2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
23 #include <linux/kvm_host.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
27 #include <linux/highmem.h>
28 #include <linux/module.h>
29 #include <linux/swap.h>
30 #include <linux/hugetlb.h>
31 #include <linux/compiler.h>
34 #include <asm/cmpxchg.h>
38 * When setting this variable to true it enables Two-Dimensional-Paging
39 * where the hardware walks 2 page tables:
40 * 1. the guest-virtual to guest-physical
41 * 2. while doing 1. it walks guest-physical to host-physical
42 * If the hardware supports that we don't need to do shadow paging.
44 bool tdp_enabled = false;
51 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
53 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
58 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
59 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
63 #define pgprintk(x...) do { } while (0)
64 #define rmap_printk(x...) do { } while (0)
68 #if defined(MMU_DEBUG) || defined(AUDIT)
73 #define ASSERT(x) do { } while (0)
77 printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
78 __FILE__, __LINE__, #x); \
82 #define PT64_PT_BITS 9
83 #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
84 #define PT32_PT_BITS 10
85 #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
87 #define PT_WRITABLE_SHIFT 1
89 #define PT_PRESENT_MASK (1ULL << 0)
90 #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
91 #define PT_USER_MASK (1ULL << 2)
92 #define PT_PWT_MASK (1ULL << 3)
93 #define PT_PCD_MASK (1ULL << 4)
94 #define PT_ACCESSED_MASK (1ULL << 5)
95 #define PT_DIRTY_MASK (1ULL << 6)
96 #define PT_PAGE_SIZE_MASK (1ULL << 7)
97 #define PT_PAT_MASK (1ULL << 7)
98 #define PT_GLOBAL_MASK (1ULL << 8)
99 #define PT64_NX_SHIFT 63
100 #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
102 #define PT_PAT_SHIFT 7
103 #define PT_DIR_PAT_SHIFT 12
104 #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
106 #define PT32_DIR_PSE36_SIZE 4
107 #define PT32_DIR_PSE36_SHIFT 13
108 #define PT32_DIR_PSE36_MASK \
109 (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
112 #define PT_FIRST_AVAIL_BITS_SHIFT 9
113 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
115 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
117 #define PT64_LEVEL_BITS 9
119 #define PT64_LEVEL_SHIFT(level) \
120 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
122 #define PT64_LEVEL_MASK(level) \
123 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
125 #define PT64_INDEX(address, level)\
126 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
129 #define PT32_LEVEL_BITS 10
131 #define PT32_LEVEL_SHIFT(level) \
132 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
134 #define PT32_LEVEL_MASK(level) \
135 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
137 #define PT32_INDEX(address, level)\
138 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
141 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
142 #define PT64_DIR_BASE_ADDR_MASK \
143 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
145 #define PT32_BASE_ADDR_MASK PAGE_MASK
146 #define PT32_DIR_BASE_ADDR_MASK \
147 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
149 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
152 #define PFERR_PRESENT_MASK (1U << 0)
153 #define PFERR_WRITE_MASK (1U << 1)
154 #define PFERR_USER_MASK (1U << 2)
155 #define PFERR_FETCH_MASK (1U << 4)
157 #define PT64_ROOT_LEVEL 4
158 #define PT32_ROOT_LEVEL 2
159 #define PT32E_ROOT_LEVEL 3
161 #define PT_DIRECTORY_LEVEL 2
162 #define PT_PAGE_TABLE_LEVEL 1
166 #define ACC_EXEC_MASK 1
167 #define ACC_WRITE_MASK PT_WRITABLE_MASK
168 #define ACC_USER_MASK PT_USER_MASK
169 #define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
171 struct kvm_pv_mmu_op_buffer {
175 char buf[512] __aligned(sizeof(long));
178 struct kvm_rmap_desc {
179 u64 *shadow_ptes[RMAP_EXT];
180 struct kvm_rmap_desc *more;
183 static struct kmem_cache *pte_chain_cache;
184 static struct kmem_cache *rmap_desc_cache;
185 static struct kmem_cache *mmu_page_header_cache;
187 static u64 __read_mostly shadow_trap_nonpresent_pte;
188 static u64 __read_mostly shadow_notrap_nonpresent_pte;
190 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
192 shadow_trap_nonpresent_pte = trap_pte;
193 shadow_notrap_nonpresent_pte = notrap_pte;
195 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
197 static int is_write_protection(struct kvm_vcpu *vcpu)
199 return vcpu->arch.cr0 & X86_CR0_WP;
202 static int is_cpuid_PSE36(void)
207 static int is_nx(struct kvm_vcpu *vcpu)
209 return vcpu->arch.shadow_efer & EFER_NX;
212 static int is_present_pte(unsigned long pte)
214 return pte & PT_PRESENT_MASK;
217 static int is_shadow_present_pte(u64 pte)
219 return pte != shadow_trap_nonpresent_pte
220 && pte != shadow_notrap_nonpresent_pte;
223 static int is_large_pte(u64 pte)
225 return pte & PT_PAGE_SIZE_MASK;
228 static int is_writeble_pte(unsigned long pte)
230 return pte & PT_WRITABLE_MASK;
233 static int is_dirty_pte(unsigned long pte)
235 return pte & PT_DIRTY_MASK;
238 static int is_rmap_pte(u64 pte)
240 return is_shadow_present_pte(pte);
243 static gfn_t pse36_gfn_delta(u32 gpte)
245 int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
247 return (gpte & PT32_DIR_PSE36_MASK) << shift;
250 static void set_shadow_pte(u64 *sptep, u64 spte)
253 set_64bit((unsigned long *)sptep, spte);
255 set_64bit((unsigned long long *)sptep, spte);
259 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
260 struct kmem_cache *base_cache, int min)
264 if (cache->nobjs >= min)
266 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
267 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
270 cache->objects[cache->nobjs++] = obj;
275 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
278 kfree(mc->objects[--mc->nobjs]);
281 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
286 if (cache->nobjs >= min)
288 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
289 page = alloc_page(GFP_KERNEL);
292 set_page_private(page, 0);
293 cache->objects[cache->nobjs++] = page_address(page);
298 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
301 free_page((unsigned long)mc->objects[--mc->nobjs]);
304 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
308 r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
312 r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
316 r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
319 r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
320 mmu_page_header_cache, 4);
325 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
327 mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
328 mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
329 mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
330 mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
333 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
339 p = mc->objects[--mc->nobjs];
344 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
346 return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
347 sizeof(struct kvm_pte_chain));
350 static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
355 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
357 return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
358 sizeof(struct kvm_rmap_desc));
361 static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
367 * Return the pointer to the largepage write count for a given
368 * gfn, handling slots that are not large page aligned.
370 static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot)
374 idx = (gfn / KVM_PAGES_PER_HPAGE) -
375 (slot->base_gfn / KVM_PAGES_PER_HPAGE);
376 return &slot->lpage_info[idx].write_count;
379 static void account_shadowed(struct kvm *kvm, gfn_t gfn)
383 write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
385 WARN_ON(*write_count > KVM_PAGES_PER_HPAGE);
388 static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
392 write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
394 WARN_ON(*write_count < 0);
397 static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn)
399 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
403 largepage_idx = slot_largepage_idx(gfn, slot);
404 return *largepage_idx;
410 static int host_largepage_backed(struct kvm *kvm, gfn_t gfn)
412 struct vm_area_struct *vma;
415 addr = gfn_to_hva(kvm, gfn);
416 if (kvm_is_error_hva(addr))
419 vma = find_vma(current->mm, addr);
420 if (vma && is_vm_hugetlb_page(vma))
426 static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn)
428 struct kvm_memory_slot *slot;
430 if (has_wrprotected_page(vcpu->kvm, large_gfn))
433 if (!host_largepage_backed(vcpu->kvm, large_gfn))
436 slot = gfn_to_memslot(vcpu->kvm, large_gfn);
437 if (slot && slot->dirty_bitmap)
444 * Take gfn and return the reverse mapping to it.
445 * Note: gfn must be unaliased before this function get called
448 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
450 struct kvm_memory_slot *slot;
453 slot = gfn_to_memslot(kvm, gfn);
455 return &slot->rmap[gfn - slot->base_gfn];
457 idx = (gfn / KVM_PAGES_PER_HPAGE) -
458 (slot->base_gfn / KVM_PAGES_PER_HPAGE);
460 return &slot->lpage_info[idx].rmap_pde;
464 * Reverse mapping data structures:
466 * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
467 * that points to page_address(page).
469 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
470 * containing more mappings.
472 static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
474 struct kvm_mmu_page *sp;
475 struct kvm_rmap_desc *desc;
476 unsigned long *rmapp;
479 if (!is_rmap_pte(*spte))
481 gfn = unalias_gfn(vcpu->kvm, gfn);
482 sp = page_header(__pa(spte));
483 sp->gfns[spte - sp->spt] = gfn;
484 rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
486 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
487 *rmapp = (unsigned long)spte;
488 } else if (!(*rmapp & 1)) {
489 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
490 desc = mmu_alloc_rmap_desc(vcpu);
491 desc->shadow_ptes[0] = (u64 *)*rmapp;
492 desc->shadow_ptes[1] = spte;
493 *rmapp = (unsigned long)desc | 1;
495 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
496 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
497 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
499 if (desc->shadow_ptes[RMAP_EXT-1]) {
500 desc->more = mmu_alloc_rmap_desc(vcpu);
503 for (i = 0; desc->shadow_ptes[i]; ++i)
505 desc->shadow_ptes[i] = spte;
509 static void rmap_desc_remove_entry(unsigned long *rmapp,
510 struct kvm_rmap_desc *desc,
512 struct kvm_rmap_desc *prev_desc)
516 for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
518 desc->shadow_ptes[i] = desc->shadow_ptes[j];
519 desc->shadow_ptes[j] = NULL;
522 if (!prev_desc && !desc->more)
523 *rmapp = (unsigned long)desc->shadow_ptes[0];
526 prev_desc->more = desc->more;
528 *rmapp = (unsigned long)desc->more | 1;
529 mmu_free_rmap_desc(desc);
532 static void rmap_remove(struct kvm *kvm, u64 *spte)
534 struct kvm_rmap_desc *desc;
535 struct kvm_rmap_desc *prev_desc;
536 struct kvm_mmu_page *sp;
538 unsigned long *rmapp;
541 if (!is_rmap_pte(*spte))
543 sp = page_header(__pa(spte));
544 page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
545 mark_page_accessed(page);
546 if (is_writeble_pte(*spte))
547 kvm_release_page_dirty(page);
549 kvm_release_page_clean(page);
550 rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], is_large_pte(*spte));
552 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
554 } else if (!(*rmapp & 1)) {
555 rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
556 if ((u64 *)*rmapp != spte) {
557 printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
563 rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
564 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
567 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
568 if (desc->shadow_ptes[i] == spte) {
569 rmap_desc_remove_entry(rmapp,
581 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
583 struct kvm_rmap_desc *desc;
584 struct kvm_rmap_desc *prev_desc;
590 else if (!(*rmapp & 1)) {
592 return (u64 *)*rmapp;
595 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
599 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
600 if (prev_spte == spte)
601 return desc->shadow_ptes[i];
602 prev_spte = desc->shadow_ptes[i];
609 static void rmap_write_protect(struct kvm *kvm, u64 gfn)
611 unsigned long *rmapp;
613 int write_protected = 0;
615 gfn = unalias_gfn(kvm, gfn);
616 rmapp = gfn_to_rmap(kvm, gfn, 0);
618 spte = rmap_next(kvm, rmapp, NULL);
621 BUG_ON(!(*spte & PT_PRESENT_MASK));
622 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
623 if (is_writeble_pte(*spte)) {
624 set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
627 spte = rmap_next(kvm, rmapp, spte);
629 /* check for huge page mappings */
630 rmapp = gfn_to_rmap(kvm, gfn, 1);
631 spte = rmap_next(kvm, rmapp, NULL);
634 BUG_ON(!(*spte & PT_PRESENT_MASK));
635 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
636 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
637 if (is_writeble_pte(*spte)) {
638 rmap_remove(kvm, spte);
640 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
643 spte = rmap_next(kvm, rmapp, spte);
647 kvm_flush_remote_tlbs(kvm);
649 account_shadowed(kvm, gfn);
653 static int is_empty_shadow_page(u64 *spt)
658 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
659 if (*pos != shadow_trap_nonpresent_pte) {
660 printk(KERN_ERR "%s: %p %llx\n", __func__,
668 static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
670 ASSERT(is_empty_shadow_page(sp->spt));
672 __free_page(virt_to_page(sp->spt));
673 __free_page(virt_to_page(sp->gfns));
675 ++kvm->arch.n_free_mmu_pages;
678 static unsigned kvm_page_table_hashfn(gfn_t gfn)
680 return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
683 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
686 struct kvm_mmu_page *sp;
688 sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
689 sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
690 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
691 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
692 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
693 ASSERT(is_empty_shadow_page(sp->spt));
696 sp->parent_pte = parent_pte;
697 --vcpu->kvm->arch.n_free_mmu_pages;
701 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
702 struct kvm_mmu_page *sp, u64 *parent_pte)
704 struct kvm_pte_chain *pte_chain;
705 struct hlist_node *node;
710 if (!sp->multimapped) {
711 u64 *old = sp->parent_pte;
714 sp->parent_pte = parent_pte;
718 pte_chain = mmu_alloc_pte_chain(vcpu);
719 INIT_HLIST_HEAD(&sp->parent_ptes);
720 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
721 pte_chain->parent_ptes[0] = old;
723 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
724 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
726 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
727 if (!pte_chain->parent_ptes[i]) {
728 pte_chain->parent_ptes[i] = parent_pte;
732 pte_chain = mmu_alloc_pte_chain(vcpu);
734 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
735 pte_chain->parent_ptes[0] = parent_pte;
738 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
741 struct kvm_pte_chain *pte_chain;
742 struct hlist_node *node;
745 if (!sp->multimapped) {
746 BUG_ON(sp->parent_pte != parent_pte);
747 sp->parent_pte = NULL;
750 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
751 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
752 if (!pte_chain->parent_ptes[i])
754 if (pte_chain->parent_ptes[i] != parent_pte)
756 while (i + 1 < NR_PTE_CHAIN_ENTRIES
757 && pte_chain->parent_ptes[i + 1]) {
758 pte_chain->parent_ptes[i]
759 = pte_chain->parent_ptes[i + 1];
762 pte_chain->parent_ptes[i] = NULL;
764 hlist_del(&pte_chain->link);
765 mmu_free_pte_chain(pte_chain);
766 if (hlist_empty(&sp->parent_ptes)) {
768 sp->parent_pte = NULL;
776 static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
779 struct hlist_head *bucket;
780 struct kvm_mmu_page *sp;
781 struct hlist_node *node;
783 pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
784 index = kvm_page_table_hashfn(gfn);
785 bucket = &kvm->arch.mmu_page_hash[index];
786 hlist_for_each_entry(sp, node, bucket, hash_link)
787 if (sp->gfn == gfn && !sp->role.metaphysical
788 && !sp->role.invalid) {
789 pgprintk("%s: found role %x\n",
790 __func__, sp->role.word);
796 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
804 union kvm_mmu_page_role role;
807 struct hlist_head *bucket;
808 struct kvm_mmu_page *sp;
809 struct hlist_node *node;
812 role.glevels = vcpu->arch.mmu.root_level;
814 role.metaphysical = metaphysical;
815 role.access = access;
816 if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
817 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
818 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
819 role.quadrant = quadrant;
821 pgprintk("%s: looking gfn %lx role %x\n", __func__,
823 index = kvm_page_table_hashfn(gfn);
824 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
825 hlist_for_each_entry(sp, node, bucket, hash_link)
826 if (sp->gfn == gfn && sp->role.word == role.word) {
827 mmu_page_add_parent_pte(vcpu, sp, parent_pte);
828 pgprintk("%s: found\n", __func__);
831 ++vcpu->kvm->stat.mmu_cache_miss;
832 sp = kvm_mmu_alloc_page(vcpu, parent_pte);
835 pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
838 hlist_add_head(&sp->hash_link, bucket);
839 vcpu->arch.mmu.prefetch_page(vcpu, sp);
841 rmap_write_protect(vcpu->kvm, gfn);
845 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
846 struct kvm_mmu_page *sp)
854 if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
855 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
856 if (is_shadow_present_pte(pt[i]))
857 rmap_remove(kvm, &pt[i]);
858 pt[i] = shadow_trap_nonpresent_pte;
860 kvm_flush_remote_tlbs(kvm);
864 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
867 if (is_shadow_present_pte(ent)) {
868 if (!is_large_pte(ent)) {
869 ent &= PT64_BASE_ADDR_MASK;
870 mmu_page_remove_parent_pte(page_header(ent),
874 rmap_remove(kvm, &pt[i]);
877 pt[i] = shadow_trap_nonpresent_pte;
879 kvm_flush_remote_tlbs(kvm);
882 static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
884 mmu_page_remove_parent_pte(sp, parent_pte);
887 static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
891 for (i = 0; i < KVM_MAX_VCPUS; ++i)
893 kvm->vcpus[i]->arch.last_pte_updated = NULL;
896 static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
900 ++kvm->stat.mmu_shadow_zapped;
901 while (sp->multimapped || sp->parent_pte) {
902 if (!sp->multimapped)
903 parent_pte = sp->parent_pte;
905 struct kvm_pte_chain *chain;
907 chain = container_of(sp->parent_ptes.first,
908 struct kvm_pte_chain, link);
909 parent_pte = chain->parent_ptes[0];
912 kvm_mmu_put_page(sp, parent_pte);
913 set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
915 kvm_mmu_page_unlink_children(kvm, sp);
916 if (!sp->root_count) {
917 if (!sp->role.metaphysical)
918 unaccount_shadowed(kvm, sp->gfn);
919 hlist_del(&sp->hash_link);
920 kvm_mmu_free_page(kvm, sp);
922 list_move(&sp->link, &kvm->arch.active_mmu_pages);
923 sp->role.invalid = 1;
924 kvm_reload_remote_mmus(kvm);
926 kvm_mmu_reset_last_pte_updated(kvm);
930 * Changing the number of mmu pages allocated to the vm
931 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
933 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
936 * If we set the number of mmu pages to be smaller be than the
937 * number of actived pages , we must to free some mmu pages before we
941 if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) >
943 int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages
944 - kvm->arch.n_free_mmu_pages;
946 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
947 struct kvm_mmu_page *page;
949 page = container_of(kvm->arch.active_mmu_pages.prev,
950 struct kvm_mmu_page, link);
951 kvm_mmu_zap_page(kvm, page);
954 kvm->arch.n_free_mmu_pages = 0;
957 kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
958 - kvm->arch.n_alloc_mmu_pages;
960 kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
963 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
966 struct hlist_head *bucket;
967 struct kvm_mmu_page *sp;
968 struct hlist_node *node, *n;
971 pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
973 index = kvm_page_table_hashfn(gfn);
974 bucket = &kvm->arch.mmu_page_hash[index];
975 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
976 if (sp->gfn == gfn && !sp->role.metaphysical) {
977 pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
979 kvm_mmu_zap_page(kvm, sp);
985 static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
987 struct kvm_mmu_page *sp;
989 while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
990 pgprintk("%s: zap %lx %x\n", __func__, gfn, sp->role.word);
991 kvm_mmu_zap_page(kvm, sp);
995 static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
997 int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
998 struct kvm_mmu_page *sp = page_header(__pa(pte));
1000 __set_bit(slot, &sp->slot_bitmap);
1003 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
1007 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1009 if (gpa == UNMAPPED_GVA)
1012 down_read(¤t->mm->mmap_sem);
1013 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1014 up_read(¤t->mm->mmap_sem);
1019 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1020 unsigned pt_access, unsigned pte_access,
1021 int user_fault, int write_fault, int dirty,
1022 int *ptwrite, int largepage, gfn_t gfn,
1023 struct page *page, bool speculative)
1026 int was_rmapped = 0;
1027 int was_writeble = is_writeble_pte(*shadow_pte);
1028 hfn_t host_pfn = (*shadow_pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
1030 pgprintk("%s: spte %llx access %x write_fault %d"
1031 " user_fault %d gfn %lx\n",
1032 __func__, *shadow_pte, pt_access,
1033 write_fault, user_fault, gfn);
1035 if (is_rmap_pte(*shadow_pte)) {
1037 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
1038 * the parent of the now unreachable PTE.
1040 if (largepage && !is_large_pte(*shadow_pte)) {
1041 struct kvm_mmu_page *child;
1042 u64 pte = *shadow_pte;
1044 child = page_header(pte & PT64_BASE_ADDR_MASK);
1045 mmu_page_remove_parent_pte(child, shadow_pte);
1046 } else if (host_pfn != page_to_pfn(page)) {
1047 pgprintk("hfn old %lx new %lx\n",
1048 host_pfn, page_to_pfn(page));
1049 rmap_remove(vcpu->kvm, shadow_pte);
1052 was_rmapped = is_large_pte(*shadow_pte);
1059 * We don't set the accessed bit, since we sometimes want to see
1060 * whether the guest actually used the pte (in order to detect
1063 spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
1065 pte_access |= PT_ACCESSED_MASK;
1067 pte_access &= ~ACC_WRITE_MASK;
1068 if (!(pte_access & ACC_EXEC_MASK))
1069 spte |= PT64_NX_MASK;
1071 spte |= PT_PRESENT_MASK;
1072 if (pte_access & ACC_USER_MASK)
1073 spte |= PT_USER_MASK;
1075 spte |= PT_PAGE_SIZE_MASK;
1077 spte |= page_to_phys(page);
1079 if ((pte_access & ACC_WRITE_MASK)
1080 || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
1081 struct kvm_mmu_page *shadow;
1083 spte |= PT_WRITABLE_MASK;
1085 mmu_unshadow(vcpu->kvm, gfn);
1089 shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
1091 (largepage && has_wrprotected_page(vcpu->kvm, gfn))) {
1092 pgprintk("%s: found shadow page for %lx, marking ro\n",
1094 pte_access &= ~ACC_WRITE_MASK;
1095 if (is_writeble_pte(spte)) {
1096 spte &= ~PT_WRITABLE_MASK;
1097 kvm_x86_ops->tlb_flush(vcpu);
1106 if (pte_access & ACC_WRITE_MASK)
1107 mark_page_dirty(vcpu->kvm, gfn);
1109 pgprintk("%s: setting spte %llx\n", __func__, spte);
1110 pgprintk("instantiating %s PTE (%s) at %d (%llx) addr %llx\n",
1111 (spte&PT_PAGE_SIZE_MASK)? "2MB" : "4kB",
1112 (spte&PT_WRITABLE_MASK)?"RW":"R", gfn, spte, shadow_pte);
1113 set_shadow_pte(shadow_pte, spte);
1114 if (!was_rmapped && (spte & PT_PAGE_SIZE_MASK)
1115 && (spte & PT_PRESENT_MASK))
1116 ++vcpu->kvm->stat.lpages;
1118 page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
1120 rmap_add(vcpu, shadow_pte, gfn, largepage);
1121 if (!is_rmap_pte(*shadow_pte))
1122 kvm_release_page_clean(page);
1125 kvm_release_page_dirty(page);
1127 kvm_release_page_clean(page);
1129 if (!ptwrite || !*ptwrite)
1130 vcpu->arch.last_pte_updated = shadow_pte;
1133 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
1137 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1138 int largepage, gfn_t gfn, struct page *page,
1141 hpa_t table_addr = vcpu->arch.mmu.root_hpa;
1145 u32 index = PT64_INDEX(v, level);
1148 ASSERT(VALID_PAGE(table_addr));
1149 table = __va(table_addr);
1152 mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
1153 0, write, 1, &pt_write, 0, gfn, page, false);
1157 if (largepage && level == 2) {
1158 mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
1159 0, write, 1, &pt_write, 1, gfn, page, false);
1163 if (table[index] == shadow_trap_nonpresent_pte) {
1164 struct kvm_mmu_page *new_table;
1167 pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
1169 new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
1171 1, ACC_ALL, &table[index]);
1173 pgprintk("nonpaging_map: ENOMEM\n");
1174 kvm_release_page_clean(page);
1178 table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
1179 | PT_WRITABLE_MASK | PT_USER_MASK;
1181 table_addr = table[index] & PT64_BASE_ADDR_MASK;
1185 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1192 down_read(&vcpu->kvm->slots_lock);
1194 down_read(¤t->mm->mmap_sem);
1195 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1196 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1200 page = gfn_to_page(vcpu->kvm, gfn);
1201 up_read(¤t->mm->mmap_sem);
1204 if (is_error_page(page)) {
1205 kvm_release_page_clean(page);
1206 up_read(&vcpu->kvm->slots_lock);
1210 spin_lock(&vcpu->kvm->mmu_lock);
1211 kvm_mmu_free_some_pages(vcpu);
1212 r = __direct_map(vcpu, v, write, largepage, gfn, page,
1214 spin_unlock(&vcpu->kvm->mmu_lock);
1216 up_read(&vcpu->kvm->slots_lock);
1222 static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
1223 struct kvm_mmu_page *sp)
1227 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1228 sp->spt[i] = shadow_trap_nonpresent_pte;
1231 static void mmu_free_roots(struct kvm_vcpu *vcpu)
1234 struct kvm_mmu_page *sp;
1236 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
1238 spin_lock(&vcpu->kvm->mmu_lock);
1239 #ifdef CONFIG_X86_64
1240 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1241 hpa_t root = vcpu->arch.mmu.root_hpa;
1243 sp = page_header(root);
1245 if (!sp->root_count && sp->role.invalid)
1246 kvm_mmu_zap_page(vcpu->kvm, sp);
1247 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1248 spin_unlock(&vcpu->kvm->mmu_lock);
1252 for (i = 0; i < 4; ++i) {
1253 hpa_t root = vcpu->arch.mmu.pae_root[i];
1256 root &= PT64_BASE_ADDR_MASK;
1257 sp = page_header(root);
1259 if (!sp->root_count && sp->role.invalid)
1260 kvm_mmu_zap_page(vcpu->kvm, sp);
1262 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
1264 spin_unlock(&vcpu->kvm->mmu_lock);
1265 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1268 static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1272 struct kvm_mmu_page *sp;
1273 int metaphysical = 0;
1275 root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
1277 #ifdef CONFIG_X86_64
1278 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1279 hpa_t root = vcpu->arch.mmu.root_hpa;
1281 ASSERT(!VALID_PAGE(root));
1284 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
1285 PT64_ROOT_LEVEL, metaphysical,
1287 root = __pa(sp->spt);
1289 vcpu->arch.mmu.root_hpa = root;
1293 metaphysical = !is_paging(vcpu);
1296 for (i = 0; i < 4; ++i) {
1297 hpa_t root = vcpu->arch.mmu.pae_root[i];
1299 ASSERT(!VALID_PAGE(root));
1300 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
1301 if (!is_present_pte(vcpu->arch.pdptrs[i])) {
1302 vcpu->arch.mmu.pae_root[i] = 0;
1305 root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
1306 } else if (vcpu->arch.mmu.root_level == 0)
1308 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1309 PT32_ROOT_LEVEL, metaphysical,
1311 root = __pa(sp->spt);
1313 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
1315 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
1318 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
1323 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
1329 pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
1330 r = mmu_topup_memory_caches(vcpu);
1335 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
1337 gfn = gva >> PAGE_SHIFT;
1339 return nonpaging_map(vcpu, gva & PAGE_MASK,
1340 error_code & PFERR_WRITE_MASK, gfn);
1343 static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
1349 gfn_t gfn = gpa >> PAGE_SHIFT;
1352 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
1354 r = mmu_topup_memory_caches(vcpu);
1358 down_read(¤t->mm->mmap_sem);
1359 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1360 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1363 page = gfn_to_page(vcpu->kvm, gfn);
1364 if (is_error_page(page)) {
1365 kvm_release_page_clean(page);
1366 up_read(¤t->mm->mmap_sem);
1369 spin_lock(&vcpu->kvm->mmu_lock);
1370 kvm_mmu_free_some_pages(vcpu);
1371 r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
1372 largepage, gfn, page, TDP_ROOT_LEVEL);
1373 spin_unlock(&vcpu->kvm->mmu_lock);
1374 up_read(¤t->mm->mmap_sem);
1379 static void nonpaging_free(struct kvm_vcpu *vcpu)
1381 mmu_free_roots(vcpu);
1384 static int nonpaging_init_context(struct kvm_vcpu *vcpu)
1386 struct kvm_mmu *context = &vcpu->arch.mmu;
1388 context->new_cr3 = nonpaging_new_cr3;
1389 context->page_fault = nonpaging_page_fault;
1390 context->gva_to_gpa = nonpaging_gva_to_gpa;
1391 context->free = nonpaging_free;
1392 context->prefetch_page = nonpaging_prefetch_page;
1393 context->root_level = 0;
1394 context->shadow_root_level = PT32E_ROOT_LEVEL;
1395 context->root_hpa = INVALID_PAGE;
1399 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
1401 ++vcpu->stat.tlb_flush;
1402 kvm_x86_ops->tlb_flush(vcpu);
1405 static void paging_new_cr3(struct kvm_vcpu *vcpu)
1407 pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
1408 mmu_free_roots(vcpu);
1411 static void inject_page_fault(struct kvm_vcpu *vcpu,
1415 kvm_inject_page_fault(vcpu, addr, err_code);
1418 static void paging_free(struct kvm_vcpu *vcpu)
1420 nonpaging_free(vcpu);
1424 #include "paging_tmpl.h"
1428 #include "paging_tmpl.h"
1431 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
1433 struct kvm_mmu *context = &vcpu->arch.mmu;
1435 ASSERT(is_pae(vcpu));
1436 context->new_cr3 = paging_new_cr3;
1437 context->page_fault = paging64_page_fault;
1438 context->gva_to_gpa = paging64_gva_to_gpa;
1439 context->prefetch_page = paging64_prefetch_page;
1440 context->free = paging_free;
1441 context->root_level = level;
1442 context->shadow_root_level = level;
1443 context->root_hpa = INVALID_PAGE;
1447 static int paging64_init_context(struct kvm_vcpu *vcpu)
1449 return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
1452 static int paging32_init_context(struct kvm_vcpu *vcpu)
1454 struct kvm_mmu *context = &vcpu->arch.mmu;
1456 context->new_cr3 = paging_new_cr3;
1457 context->page_fault = paging32_page_fault;
1458 context->gva_to_gpa = paging32_gva_to_gpa;
1459 context->free = paging_free;
1460 context->prefetch_page = paging32_prefetch_page;
1461 context->root_level = PT32_ROOT_LEVEL;
1462 context->shadow_root_level = PT32E_ROOT_LEVEL;
1463 context->root_hpa = INVALID_PAGE;
1467 static int paging32E_init_context(struct kvm_vcpu *vcpu)
1469 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
1472 static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
1474 struct kvm_mmu *context = &vcpu->arch.mmu;
1476 context->new_cr3 = nonpaging_new_cr3;
1477 context->page_fault = tdp_page_fault;
1478 context->free = nonpaging_free;
1479 context->prefetch_page = nonpaging_prefetch_page;
1480 context->shadow_root_level = TDP_ROOT_LEVEL;
1481 context->root_hpa = INVALID_PAGE;
1483 if (!is_paging(vcpu)) {
1484 context->gva_to_gpa = nonpaging_gva_to_gpa;
1485 context->root_level = 0;
1486 } else if (is_long_mode(vcpu)) {
1487 context->gva_to_gpa = paging64_gva_to_gpa;
1488 context->root_level = PT64_ROOT_LEVEL;
1489 } else if (is_pae(vcpu)) {
1490 context->gva_to_gpa = paging64_gva_to_gpa;
1491 context->root_level = PT32E_ROOT_LEVEL;
1493 context->gva_to_gpa = paging32_gva_to_gpa;
1494 context->root_level = PT32_ROOT_LEVEL;
1500 static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
1503 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
1505 if (!is_paging(vcpu))
1506 return nonpaging_init_context(vcpu);
1507 else if (is_long_mode(vcpu))
1508 return paging64_init_context(vcpu);
1509 else if (is_pae(vcpu))
1510 return paging32E_init_context(vcpu);
1512 return paging32_init_context(vcpu);
1515 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1518 return init_kvm_tdp_mmu(vcpu);
1520 return init_kvm_softmmu(vcpu);
1523 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
1526 if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
1527 vcpu->arch.mmu.free(vcpu);
1528 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1532 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
1534 destroy_kvm_mmu(vcpu);
1535 return init_kvm_mmu(vcpu);
1537 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
1539 int kvm_mmu_load(struct kvm_vcpu *vcpu)
1543 r = mmu_topup_memory_caches(vcpu);
1546 spin_lock(&vcpu->kvm->mmu_lock);
1547 kvm_mmu_free_some_pages(vcpu);
1548 mmu_alloc_roots(vcpu);
1549 spin_unlock(&vcpu->kvm->mmu_lock);
1550 kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
1551 kvm_mmu_flush_tlb(vcpu);
1555 EXPORT_SYMBOL_GPL(kvm_mmu_load);
1557 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
1559 mmu_free_roots(vcpu);
1562 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
1563 struct kvm_mmu_page *sp,
1567 struct kvm_mmu_page *child;
1570 if (is_shadow_present_pte(pte)) {
1571 if (sp->role.level == PT_PAGE_TABLE_LEVEL ||
1573 rmap_remove(vcpu->kvm, spte);
1575 child = page_header(pte & PT64_BASE_ADDR_MASK);
1576 mmu_page_remove_parent_pte(child, spte);
1579 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
1580 if (is_large_pte(pte))
1581 --vcpu->kvm->stat.lpages;
1584 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1585 struct kvm_mmu_page *sp,
1589 if ((sp->role.level != PT_PAGE_TABLE_LEVEL)
1590 && !vcpu->arch.update_pte.largepage) {
1591 ++vcpu->kvm->stat.mmu_pde_zapped;
1595 ++vcpu->kvm->stat.mmu_pte_updated;
1596 if (sp->role.glevels == PT32_ROOT_LEVEL)
1597 paging32_update_pte(vcpu, sp, spte, new);
1599 paging64_update_pte(vcpu, sp, spte, new);
1602 static bool need_remote_flush(u64 old, u64 new)
1604 if (!is_shadow_present_pte(old))
1606 if (!is_shadow_present_pte(new))
1608 if ((old ^ new) & PT64_BASE_ADDR_MASK)
1610 old ^= PT64_NX_MASK;
1611 new ^= PT64_NX_MASK;
1612 return (old & ~new & PT64_PERM_MASK) != 0;
1615 static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
1617 if (need_remote_flush(old, new))
1618 kvm_flush_remote_tlbs(vcpu->kvm);
1620 kvm_mmu_flush_tlb(vcpu);
1623 static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
1625 u64 *spte = vcpu->arch.last_pte_updated;
1627 return !!(spte && (*spte & PT_ACCESSED_MASK));
1630 static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1631 const u8 *new, int bytes)
1638 vcpu->arch.update_pte.largepage = 0;
1640 if (bytes != 4 && bytes != 8)
1644 * Assume that the pte write on a page table of the same type
1645 * as the current vcpu paging mode. This is nearly always true
1646 * (might be false while changing modes). Note it is verified later
1650 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
1651 if ((bytes == 4) && (gpa % 4 == 0)) {
1652 r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
1655 memcpy((void *)&gpte + (gpa % 8), new, 4);
1656 } else if ((bytes == 8) && (gpa % 8 == 0)) {
1657 memcpy((void *)&gpte, new, 8);
1660 if ((bytes == 4) && (gpa % 4 == 0))
1661 memcpy((void *)&gpte, new, 4);
1663 if (!is_present_pte(gpte))
1665 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
1667 down_read(¤t->mm->mmap_sem);
1668 if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
1669 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1670 vcpu->arch.update_pte.largepage = 1;
1672 page = gfn_to_page(vcpu->kvm, gfn);
1673 up_read(¤t->mm->mmap_sem);
1675 if (is_error_page(page)) {
1676 kvm_release_page_clean(page);
1679 vcpu->arch.update_pte.gfn = gfn;
1680 vcpu->arch.update_pte.page = page;
1683 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1684 const u8 *new, int bytes)
1686 gfn_t gfn = gpa >> PAGE_SHIFT;
1687 struct kvm_mmu_page *sp;
1688 struct hlist_node *node, *n;
1689 struct hlist_head *bucket;
1693 unsigned offset = offset_in_page(gpa);
1695 unsigned page_offset;
1696 unsigned misaligned;
1703 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
1704 mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
1705 spin_lock(&vcpu->kvm->mmu_lock);
1706 kvm_mmu_free_some_pages(vcpu);
1707 ++vcpu->kvm->stat.mmu_pte_write;
1708 kvm_mmu_audit(vcpu, "pre pte write");
1709 if (gfn == vcpu->arch.last_pt_write_gfn
1710 && !last_updated_pte_accessed(vcpu)) {
1711 ++vcpu->arch.last_pt_write_count;
1712 if (vcpu->arch.last_pt_write_count >= 3)
1715 vcpu->arch.last_pt_write_gfn = gfn;
1716 vcpu->arch.last_pt_write_count = 1;
1717 vcpu->arch.last_pte_updated = NULL;
1719 index = kvm_page_table_hashfn(gfn);
1720 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1721 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
1722 if (sp->gfn != gfn || sp->role.metaphysical)
1724 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
1725 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
1726 misaligned |= bytes < 4;
1727 if (misaligned || flooded) {
1729 * Misaligned accesses are too much trouble to fix
1730 * up; also, they usually indicate a page is not used
1733 * If we're seeing too many writes to a page,
1734 * it may no longer be a page table, or we may be
1735 * forking, in which case it is better to unmap the
1738 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
1739 gpa, bytes, sp->role.word);
1740 kvm_mmu_zap_page(vcpu->kvm, sp);
1741 ++vcpu->kvm->stat.mmu_flooded;
1744 page_offset = offset;
1745 level = sp->role.level;
1747 if (sp->role.glevels == PT32_ROOT_LEVEL) {
1748 page_offset <<= 1; /* 32->64 */
1750 * A 32-bit pde maps 4MB while the shadow pdes map
1751 * only 2MB. So we need to double the offset again
1752 * and zap two pdes instead of one.
1754 if (level == PT32_ROOT_LEVEL) {
1755 page_offset &= ~7; /* kill rounding error */
1759 quadrant = page_offset >> PAGE_SHIFT;
1760 page_offset &= ~PAGE_MASK;
1761 if (quadrant != sp->role.quadrant)
1764 spte = &sp->spt[page_offset / sizeof(*spte)];
1765 if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
1767 r = kvm_read_guest_atomic(vcpu->kvm,
1768 gpa & ~(u64)(pte_size - 1),
1770 new = (const void *)&gentry;
1776 mmu_pte_write_zap_pte(vcpu, sp, spte);
1778 mmu_pte_write_new_pte(vcpu, sp, spte, new);
1779 mmu_pte_write_flush_tlb(vcpu, entry, *spte);
1783 kvm_mmu_audit(vcpu, "post pte write");
1784 spin_unlock(&vcpu->kvm->mmu_lock);
1785 if (vcpu->arch.update_pte.page) {
1786 kvm_release_page_clean(vcpu->arch.update_pte.page);
1787 vcpu->arch.update_pte.page = NULL;
1791 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1796 down_read(&vcpu->kvm->slots_lock);
1797 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1798 up_read(&vcpu->kvm->slots_lock);
1800 spin_lock(&vcpu->kvm->mmu_lock);
1801 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1802 spin_unlock(&vcpu->kvm->mmu_lock);
1806 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
1808 while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
1809 struct kvm_mmu_page *sp;
1811 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
1812 struct kvm_mmu_page, link);
1813 kvm_mmu_zap_page(vcpu->kvm, sp);
1814 ++vcpu->kvm->stat.mmu_recycled;
1818 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
1821 enum emulation_result er;
1823 r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
1832 r = mmu_topup_memory_caches(vcpu);
1836 er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
1841 case EMULATE_DO_MMIO:
1842 ++vcpu->stat.mmio_exits;
1845 kvm_report_emulation_failure(vcpu, "pagetable");
1853 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
1855 void kvm_enable_tdp(void)
1859 EXPORT_SYMBOL_GPL(kvm_enable_tdp);
1861 static void free_mmu_pages(struct kvm_vcpu *vcpu)
1863 struct kvm_mmu_page *sp;
1865 while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
1866 sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
1867 struct kvm_mmu_page, link);
1868 kvm_mmu_zap_page(vcpu->kvm, sp);
1870 free_page((unsigned long)vcpu->arch.mmu.pae_root);
1873 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1880 if (vcpu->kvm->arch.n_requested_mmu_pages)
1881 vcpu->kvm->arch.n_free_mmu_pages =
1882 vcpu->kvm->arch.n_requested_mmu_pages;
1884 vcpu->kvm->arch.n_free_mmu_pages =
1885 vcpu->kvm->arch.n_alloc_mmu_pages;
1887 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
1888 * Therefore we need to allocate shadow page tables in the first
1889 * 4GB of memory, which happens to fit the DMA32 zone.
1891 page = alloc_page(GFP_KERNEL | __GFP_DMA32);
1894 vcpu->arch.mmu.pae_root = page_address(page);
1895 for (i = 0; i < 4; ++i)
1896 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
1901 free_mmu_pages(vcpu);
1905 int kvm_mmu_create(struct kvm_vcpu *vcpu)
1908 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
1910 return alloc_mmu_pages(vcpu);
1913 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
1916 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
1918 return init_kvm_mmu(vcpu);
1921 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
1925 destroy_kvm_mmu(vcpu);
1926 free_mmu_pages(vcpu);
1927 mmu_free_memory_caches(vcpu);
1930 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
1932 struct kvm_mmu_page *sp;
1934 list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
1938 if (!test_bit(slot, &sp->slot_bitmap))
1942 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1944 if (pt[i] & PT_WRITABLE_MASK)
1945 pt[i] &= ~PT_WRITABLE_MASK;
1949 void kvm_mmu_zap_all(struct kvm *kvm)
1951 struct kvm_mmu_page *sp, *node;
1953 spin_lock(&kvm->mmu_lock);
1954 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
1955 kvm_mmu_zap_page(kvm, sp);
1956 spin_unlock(&kvm->mmu_lock);
1958 kvm_flush_remote_tlbs(kvm);
1961 void kvm_mmu_module_exit(void)
1963 if (pte_chain_cache)
1964 kmem_cache_destroy(pte_chain_cache);
1965 if (rmap_desc_cache)
1966 kmem_cache_destroy(rmap_desc_cache);
1967 if (mmu_page_header_cache)
1968 kmem_cache_destroy(mmu_page_header_cache);
1971 int kvm_mmu_module_init(void)
1973 pte_chain_cache = kmem_cache_create("kvm_pte_chain",
1974 sizeof(struct kvm_pte_chain),
1976 if (!pte_chain_cache)
1978 rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
1979 sizeof(struct kvm_rmap_desc),
1981 if (!rmap_desc_cache)
1984 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
1985 sizeof(struct kvm_mmu_page),
1987 if (!mmu_page_header_cache)
1993 kvm_mmu_module_exit();
1998 * Caculate mmu pages needed for kvm.
2000 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
2003 unsigned int nr_mmu_pages;
2004 unsigned int nr_pages = 0;
2006 for (i = 0; i < kvm->nmemslots; i++)
2007 nr_pages += kvm->memslots[i].npages;
2009 nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
2010 nr_mmu_pages = max(nr_mmu_pages,
2011 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
2013 return nr_mmu_pages;
2016 static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2019 if (len > buffer->len)
2024 static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2029 ret = pv_mmu_peek_buffer(buffer, len);
2034 buffer->processed += len;
2038 static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
2039 gpa_t addr, gpa_t value)
2044 if (!is_long_mode(vcpu) && !is_pae(vcpu))
2047 r = mmu_topup_memory_caches(vcpu);
2051 if (!__emulator_write_phys(vcpu, addr, &value, bytes))
2057 static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2059 kvm_x86_ops->tlb_flush(vcpu);
2063 static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
2065 spin_lock(&vcpu->kvm->mmu_lock);
2066 mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
2067 spin_unlock(&vcpu->kvm->mmu_lock);
2071 static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
2072 struct kvm_pv_mmu_op_buffer *buffer)
2074 struct kvm_mmu_op_header *header;
2076 header = pv_mmu_peek_buffer(buffer, sizeof *header);
2079 switch (header->op) {
2080 case KVM_MMU_OP_WRITE_PTE: {
2081 struct kvm_mmu_op_write_pte *wpte;
2083 wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
2086 return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
2089 case KVM_MMU_OP_FLUSH_TLB: {
2090 struct kvm_mmu_op_flush_tlb *ftlb;
2092 ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
2095 return kvm_pv_mmu_flush_tlb(vcpu);
2097 case KVM_MMU_OP_RELEASE_PT: {
2098 struct kvm_mmu_op_release_pt *rpt;
2100 rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
2103 return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
2109 int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
2110 gpa_t addr, unsigned long *ret)
2113 struct kvm_pv_mmu_op_buffer buffer;
2115 down_read(&vcpu->kvm->slots_lock);
2116 down_read(¤t->mm->mmap_sem);
2118 buffer.ptr = buffer.buf;
2119 buffer.len = min_t(unsigned long, bytes, sizeof buffer.buf);
2120 buffer.processed = 0;
2122 r = kvm_read_guest(vcpu->kvm, addr, buffer.buf, buffer.len);
2126 while (buffer.len) {
2127 r = kvm_pv_mmu_op_one(vcpu, &buffer);
2136 *ret = buffer.processed;
2137 up_read(¤t->mm->mmap_sem);
2138 up_read(&vcpu->kvm->slots_lock);
2144 static const char *audit_msg;
2146 static gva_t canonicalize(gva_t gva)
2148 #ifdef CONFIG_X86_64
2149 gva = (long long)(gva << 16) >> 16;
2154 static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
2155 gva_t va, int level)
2157 u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
2159 gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
2161 for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
2164 if (ent == shadow_trap_nonpresent_pte)
2167 va = canonicalize(va);
2169 if (ent == shadow_notrap_nonpresent_pte)
2170 printk(KERN_ERR "audit: (%s) nontrapping pte"
2171 " in nonleaf level: levels %d gva %lx"
2172 " level %d pte %llx\n", audit_msg,
2173 vcpu->arch.mmu.root_level, va, level, ent);
2175 audit_mappings_page(vcpu, ent, va, level - 1);
2177 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
2178 struct page *page = gpa_to_page(vcpu, gpa);
2179 hpa_t hpa = page_to_phys(page);
2181 if (is_shadow_present_pte(ent)
2182 && (ent & PT64_BASE_ADDR_MASK) != hpa)
2183 printk(KERN_ERR "xx audit error: (%s) levels %d"
2184 " gva %lx gpa %llx hpa %llx ent %llx %d\n",
2185 audit_msg, vcpu->arch.mmu.root_level,
2187 is_shadow_present_pte(ent));
2188 else if (ent == shadow_notrap_nonpresent_pte
2189 && !is_error_hpa(hpa))
2190 printk(KERN_ERR "audit: (%s) notrap shadow,"
2191 " valid guest gva %lx\n", audit_msg, va);
2192 kvm_release_page_clean(page);
2198 static void audit_mappings(struct kvm_vcpu *vcpu)
2202 if (vcpu->arch.mmu.root_level == 4)
2203 audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
2205 for (i = 0; i < 4; ++i)
2206 if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
2207 audit_mappings_page(vcpu,
2208 vcpu->arch.mmu.pae_root[i],
2213 static int count_rmaps(struct kvm_vcpu *vcpu)
2218 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
2219 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
2220 struct kvm_rmap_desc *d;
2222 for (j = 0; j < m->npages; ++j) {
2223 unsigned long *rmapp = &m->rmap[j];
2227 if (!(*rmapp & 1)) {
2231 d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
2233 for (k = 0; k < RMAP_EXT; ++k)
2234 if (d->shadow_ptes[k])
2245 static int count_writable_mappings(struct kvm_vcpu *vcpu)
2248 struct kvm_mmu_page *sp;
2251 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
2254 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
2257 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
2260 if (!(ent & PT_PRESENT_MASK))
2262 if (!(ent & PT_WRITABLE_MASK))
2270 static void audit_rmap(struct kvm_vcpu *vcpu)
2272 int n_rmap = count_rmaps(vcpu);
2273 int n_actual = count_writable_mappings(vcpu);
2275 if (n_rmap != n_actual)
2276 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
2277 __func__, audit_msg, n_rmap, n_actual);
2280 static void audit_write_protection(struct kvm_vcpu *vcpu)
2282 struct kvm_mmu_page *sp;
2283 struct kvm_memory_slot *slot;
2284 unsigned long *rmapp;
2287 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
2288 if (sp->role.metaphysical)
2291 slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
2292 gfn = unalias_gfn(vcpu->kvm, sp->gfn);
2293 rmapp = &slot->rmap[gfn - slot->base_gfn];
2295 printk(KERN_ERR "%s: (%s) shadow page has writable"
2296 " mappings: gfn %lx role %x\n",
2297 __func__, audit_msg, sp->gfn,
2302 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
2309 audit_write_protection(vcpu);
2310 audit_mappings(vcpu);