2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
21 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
22 * so the code in this file is compiled twice, once per pte size.
26 #define pt_element_t u64
27 #define guest_walker guest_walker64
28 #define FNAME(name) paging##64_##name
29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
30 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
31 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
32 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
34 #define PT_LEVEL_BITS PT64_LEVEL_BITS
36 #define PT_MAX_FULL_LEVELS 4
38 #define PT_MAX_FULL_LEVELS 2
41 #define pt_element_t u32
42 #define guest_walker guest_walker32
43 #define FNAME(name) paging##32_##name
44 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
45 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
46 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
47 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
48 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
49 #define PT_LEVEL_BITS PT32_LEVEL_BITS
50 #define PT_MAX_FULL_LEVELS 2
52 #error Invalid PTTYPE value
55 #define gpte_to_gfn FNAME(gpte_to_gfn)
56 #define gpte_to_gfn_pde FNAME(gpte_to_gfn_pde)
59 * The guest_walker structure emulates the behavior of the hardware page
64 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
66 pt_element_t inherited_ar;
71 static gfn_t gpte_to_gfn(pt_element_t gpte)
73 return (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
76 static gfn_t gpte_to_gfn_pde(pt_element_t gpte)
78 return (gpte & PT_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
82 * Fetch a guest pte for a guest virtual address
84 static int FNAME(walk_addr)(struct guest_walker *walker,
85 struct kvm_vcpu *vcpu, gva_t addr,
86 int write_fault, int user_fault, int fetch_fault)
93 pgprintk("%s: addr %lx\n", __FUNCTION__, addr);
94 walker->level = vcpu->mmu.root_level;
97 if (!is_long_mode(vcpu)) {
98 pte = vcpu->pdptrs[(addr >> 30) & 3];
99 if (!is_present_pte(pte))
104 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
105 (vcpu->cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
107 walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK;
110 index = PT_INDEX(addr, walker->level);
112 table_gfn = gpte_to_gfn(pte);
113 pte_gpa = table_gfn << PAGE_SHIFT;
114 pte_gpa += index * sizeof(pt_element_t);
115 walker->table_gfn[walker->level - 1] = table_gfn;
116 pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
117 walker->level - 1, table_gfn);
119 kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte));
121 if (!is_present_pte(pte))
124 if (write_fault && !is_writeble_pte(pte))
125 if (user_fault || is_write_protection(vcpu))
128 if (user_fault && !(pte & PT_USER_MASK))
132 if (fetch_fault && is_nx(vcpu) && (pte & PT64_NX_MASK))
136 if (!(pte & PT_ACCESSED_MASK)) {
137 mark_page_dirty(vcpu->kvm, table_gfn);
138 pte |= PT_ACCESSED_MASK;
139 kvm_write_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte));
142 if (walker->level == PT_PAGE_TABLE_LEVEL) {
143 walker->gfn = gpte_to_gfn(pte);
147 if (walker->level == PT_DIRECTORY_LEVEL
148 && (pte & PT_PAGE_SIZE_MASK)
149 && (PTTYPE == 64 || is_pse(vcpu))) {
150 walker->gfn = gpte_to_gfn_pde(pte);
151 walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL);
152 if (PTTYPE == 32 && is_cpuid_PSE36())
153 walker->gfn += pse36_gfn_delta(pte);
157 walker->inherited_ar &= pte;
161 if (write_fault && !is_dirty_pte(pte)) {
162 mark_page_dirty(vcpu->kvm, table_gfn);
163 pte |= PT_DIRTY_MASK;
164 kvm_write_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte));
165 kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)&pte, sizeof(pte));
169 pgprintk("%s: pte %llx\n", __FUNCTION__, (u64)pte);
173 walker->error_code = 0;
177 walker->error_code = PFERR_PRESENT_MASK;
181 walker->error_code |= PFERR_WRITE_MASK;
183 walker->error_code |= PFERR_USER_MASK;
185 walker->error_code |= PFERR_FETCH_MASK;
189 static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
197 struct guest_walker *walker,
201 int dirty = gpte & PT_DIRTY_MASK;
203 int was_rmapped = is_rmap_pte(*shadow_pte);
206 pgprintk("%s: spte %llx gpte %llx access %llx write_fault %d"
207 " user_fault %d gfn %lx\n",
208 __FUNCTION__, *shadow_pte, (u64)gpte, access_bits,
209 write_fault, user_fault, gfn);
212 * We don't set the accessed bit, since we sometimes want to see
213 * whether the guest actually used the pte (in order to detect
216 spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
217 spte |= gpte & PT64_NX_MASK;
219 access_bits &= ~PT_WRITABLE_MASK;
221 paddr = gpa_to_hpa(vcpu->kvm, gaddr & PT64_BASE_ADDR_MASK);
224 * the reason paddr get mask even that it isnt pte is beacuse the
225 * HPA_ERR_MASK bit might be used to signal error
227 page = pfn_to_page((paddr & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
229 spte |= PT_PRESENT_MASK;
230 if (access_bits & PT_USER_MASK)
231 spte |= PT_USER_MASK;
233 if (is_error_hpa(paddr)) {
234 set_shadow_pte(shadow_pte,
235 shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK);
236 kvm_release_page_clean(page);
242 if ((access_bits & PT_WRITABLE_MASK)
243 || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
244 struct kvm_mmu_page *shadow;
246 spte |= PT_WRITABLE_MASK;
248 mmu_unshadow(vcpu->kvm, gfn);
252 shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
254 pgprintk("%s: found shadow page for %lx, marking ro\n",
256 access_bits &= ~PT_WRITABLE_MASK;
257 if (is_writeble_pte(spte)) {
258 spte &= ~PT_WRITABLE_MASK;
259 kvm_x86_ops->tlb_flush(vcpu);
268 if (access_bits & PT_WRITABLE_MASK)
269 mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
271 pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte);
272 set_shadow_pte(shadow_pte, spte);
273 page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
275 rmap_add(vcpu, shadow_pte, (gaddr & PT64_BASE_ADDR_MASK)
277 if (!is_rmap_pte(*shadow_pte))
278 kvm_release_page_clean(page);
281 kvm_release_page_clean(page);
282 if (!ptwrite || !*ptwrite)
283 vcpu->last_pte_updated = shadow_pte;
286 static void FNAME(set_pte)(struct kvm_vcpu *vcpu, pt_element_t gpte,
287 u64 *shadow_pte, u64 access_bits,
288 int user_fault, int write_fault, int *ptwrite,
289 struct guest_walker *walker, gfn_t gfn)
292 FNAME(set_pte_common)(vcpu, shadow_pte, gpte & PT_BASE_ADDR_MASK,
293 gpte, access_bits, user_fault, write_fault,
294 ptwrite, walker, gfn);
297 static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
298 u64 *spte, const void *pte, int bytes,
303 gpte = *(const pt_element_t *)pte;
304 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
305 if (!offset_in_pte && !is_present_pte(gpte))
306 set_shadow_pte(spte, shadow_notrap_nonpresent_pte);
309 if (bytes < sizeof(pt_element_t))
311 pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
312 FNAME(set_pte)(vcpu, gpte, spte, PT_USER_MASK | PT_WRITABLE_MASK, 0,
313 0, NULL, NULL, gpte_to_gfn(gpte));
316 static void FNAME(set_pde)(struct kvm_vcpu *vcpu, pt_element_t gpde,
317 u64 *shadow_pte, u64 access_bits,
318 int user_fault, int write_fault, int *ptwrite,
319 struct guest_walker *walker, gfn_t gfn)
324 gaddr = (gpa_t)gfn << PAGE_SHIFT;
325 FNAME(set_pte_common)(vcpu, shadow_pte, gaddr,
326 gpde, access_bits, user_fault, write_fault,
327 ptwrite, walker, gfn);
331 * Fetch a shadow pte for a specific level in the paging hierarchy.
333 static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
334 struct guest_walker *walker,
335 int user_fault, int write_fault, int *ptwrite)
340 u64 *prev_shadow_ent = NULL;
342 if (!is_present_pte(walker->pte))
345 shadow_addr = vcpu->mmu.root_hpa;
346 level = vcpu->mmu.shadow_root_level;
347 if (level == PT32E_ROOT_LEVEL) {
348 shadow_addr = vcpu->mmu.pae_root[(addr >> 30) & 3];
349 shadow_addr &= PT64_BASE_ADDR_MASK;
354 u32 index = SHADOW_PT_INDEX(addr, level);
355 struct kvm_mmu_page *shadow_page;
359 unsigned hugepage_access = 0;
361 shadow_ent = ((u64 *)__va(shadow_addr)) + index;
362 if (is_shadow_present_pte(*shadow_ent)) {
363 if (level == PT_PAGE_TABLE_LEVEL)
365 shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
366 prev_shadow_ent = shadow_ent;
370 if (level == PT_PAGE_TABLE_LEVEL)
373 if (level - 1 == PT_PAGE_TABLE_LEVEL
374 && walker->level == PT_DIRECTORY_LEVEL) {
376 hugepage_access = walker->pte;
377 hugepage_access &= PT_USER_MASK | PT_WRITABLE_MASK;
378 if (!is_dirty_pte(walker->pte))
379 hugepage_access &= ~PT_WRITABLE_MASK;
380 hugepage_access >>= PT_WRITABLE_SHIFT;
381 if (walker->pte & PT64_NX_MASK)
382 hugepage_access |= (1 << 2);
383 table_gfn = gpte_to_gfn(walker->pte);
386 table_gfn = walker->table_gfn[level - 2];
388 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
389 metaphysical, hugepage_access,
391 shadow_addr = __pa(shadow_page->spt);
392 shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
393 | PT_WRITABLE_MASK | PT_USER_MASK;
394 *shadow_ent = shadow_pte;
395 prev_shadow_ent = shadow_ent;
398 if (walker->level == PT_DIRECTORY_LEVEL) {
399 FNAME(set_pde)(vcpu, walker->pte, shadow_ent,
400 walker->inherited_ar, user_fault, write_fault,
401 ptwrite, walker, walker->gfn);
403 ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
404 FNAME(set_pte)(vcpu, walker->pte, shadow_ent,
405 walker->inherited_ar, user_fault, write_fault,
406 ptwrite, walker, walker->gfn);
412 * Page fault handler. There are several causes for a page fault:
413 * - there is no shadow pte for the guest pte
414 * - write access through a shadow pte marked read only so that we can set
416 * - write access to a shadow pte marked read only so we can update the page
417 * dirty bitmap, when userspace requests it
418 * - mmio access; in this case we will never install a present shadow pte
419 * - normal guest page fault due to the guest pte marked not present, not
420 * writable, or not executable
422 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
423 * a negative value on error.
425 static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
428 int write_fault = error_code & PFERR_WRITE_MASK;
429 int user_fault = error_code & PFERR_USER_MASK;
430 int fetch_fault = error_code & PFERR_FETCH_MASK;
431 struct guest_walker walker;
436 pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code);
437 kvm_mmu_audit(vcpu, "pre page fault");
439 r = mmu_topup_memory_caches(vcpu);
444 * Look up the shadow pte for the faulting address.
446 r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
450 * The page is not mapped by the guest. Let the guest handle it.
453 pgprintk("%s: guest page fault\n", __FUNCTION__);
454 inject_page_fault(vcpu, addr, walker.error_code);
455 vcpu->last_pt_write_count = 0; /* reset fork detector */
459 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
461 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
462 shadow_pte, *shadow_pte, write_pt);
465 vcpu->last_pt_write_count = 0; /* reset fork detector */
468 * mmio: emulate if accessible, otherwise its a guest fault.
470 if (is_io_pte(*shadow_pte))
473 ++vcpu->stat.pf_fixed;
474 kvm_mmu_audit(vcpu, "post page fault (fixed)");
479 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
481 struct guest_walker walker;
482 gpa_t gpa = UNMAPPED_GVA;
485 r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
488 gpa = (gpa_t)walker.gfn << PAGE_SHIFT;
489 gpa |= vaddr & ~PAGE_MASK;
495 static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
496 struct kvm_mmu_page *sp)
502 if (sp->role.metaphysical
503 || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
504 nonpaging_prefetch_page(vcpu, sp);
509 offset = sp->role.quadrant << PT64_LEVEL_BITS;
510 page = gfn_to_page(vcpu->kvm, sp->gfn);
511 gpt = kmap_atomic(page, KM_USER0);
512 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
513 if (is_present_pte(gpt[offset + i]))
514 sp->spt[i] = shadow_trap_nonpresent_pte;
516 sp->spt[i] = shadow_notrap_nonpresent_pte;
517 kunmap_atomic(gpt, KM_USER0);
518 kvm_release_page_clean(page);
524 #undef PT_BASE_ADDR_MASK
526 #undef SHADOW_PT_INDEX
528 #undef PT_DIR_BASE_ADDR_MASK
530 #undef PT_MAX_FULL_LEVELS
532 #undef gpte_to_gfn_pde