]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/kvm/mmu.c
1965185bbe23fd31a23c883dcc4a5a702a6b2513
[linux-2.6-omap-h63xx.git] / drivers / kvm / mmu.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * MMU support
8  *
9  * Copyright (C) 2006 Qumranet, Inc.
10  *
11  * Authors:
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *   Avi Kivity   <avi@qumranet.com>
14  *
15  * This work is licensed under the terms of the GNU GPL, version 2.  See
16  * the COPYING file in the top-level directory.
17  *
18  */
19
20 #include "vmx.h"
21 #include "kvm.h"
22 #include "x86.h"
23
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/mm.h>
27 #include <linux/highmem.h>
28 #include <linux/module.h>
29
30 #include <asm/page.h>
31 #include <asm/cmpxchg.h>
32 #include <asm/io.h>
33
34 #undef MMU_DEBUG
35
36 #undef AUDIT
37
38 #ifdef AUDIT
39 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
40 #else
41 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
42 #endif
43
44 #ifdef MMU_DEBUG
45
46 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
47 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
48
49 #else
50
51 #define pgprintk(x...) do { } while (0)
52 #define rmap_printk(x...) do { } while (0)
53
54 #endif
55
56 #if defined(MMU_DEBUG) || defined(AUDIT)
57 static int dbg = 1;
58 #endif
59
60 #ifndef MMU_DEBUG
61 #define ASSERT(x) do { } while (0)
62 #else
63 #define ASSERT(x)                                                       \
64         if (!(x)) {                                                     \
65                 printk(KERN_WARNING "assertion failed %s:%d: %s\n",     \
66                        __FILE__, __LINE__, #x);                         \
67         }
68 #endif
69
70 #define PT64_PT_BITS 9
71 #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
72 #define PT32_PT_BITS 10
73 #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
74
75 #define PT_WRITABLE_SHIFT 1
76
77 #define PT_PRESENT_MASK (1ULL << 0)
78 #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
79 #define PT_USER_MASK (1ULL << 2)
80 #define PT_PWT_MASK (1ULL << 3)
81 #define PT_PCD_MASK (1ULL << 4)
82 #define PT_ACCESSED_MASK (1ULL << 5)
83 #define PT_DIRTY_MASK (1ULL << 6)
84 #define PT_PAGE_SIZE_MASK (1ULL << 7)
85 #define PT_PAT_MASK (1ULL << 7)
86 #define PT_GLOBAL_MASK (1ULL << 8)
87 #define PT64_NX_MASK (1ULL << 63)
88
89 #define PT_PAT_SHIFT 7
90 #define PT_DIR_PAT_SHIFT 12
91 #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
92
93 #define PT32_DIR_PSE36_SIZE 4
94 #define PT32_DIR_PSE36_SHIFT 13
95 #define PT32_DIR_PSE36_MASK \
96         (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
97
98
99 #define PT_FIRST_AVAIL_BITS_SHIFT 9
100 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
101
102 #define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
103
104 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
105
106 #define PT64_LEVEL_BITS 9
107
108 #define PT64_LEVEL_SHIFT(level) \
109                 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
110
111 #define PT64_LEVEL_MASK(level) \
112                 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
113
114 #define PT64_INDEX(address, level)\
115         (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
116
117
118 #define PT32_LEVEL_BITS 10
119
120 #define PT32_LEVEL_SHIFT(level) \
121                 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
122
123 #define PT32_LEVEL_MASK(level) \
124                 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
125
126 #define PT32_INDEX(address, level)\
127         (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
128
129
130 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
131 #define PT64_DIR_BASE_ADDR_MASK \
132         (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
133
134 #define PT32_BASE_ADDR_MASK PAGE_MASK
135 #define PT32_DIR_BASE_ADDR_MASK \
136         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
137
138 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
139                         | PT64_NX_MASK)
140
141 #define PFERR_PRESENT_MASK (1U << 0)
142 #define PFERR_WRITE_MASK (1U << 1)
143 #define PFERR_USER_MASK (1U << 2)
144 #define PFERR_FETCH_MASK (1U << 4)
145
146 #define PT64_ROOT_LEVEL 4
147 #define PT32_ROOT_LEVEL 2
148 #define PT32E_ROOT_LEVEL 3
149
150 #define PT_DIRECTORY_LEVEL 2
151 #define PT_PAGE_TABLE_LEVEL 1
152
153 #define RMAP_EXT 4
154
155 struct kvm_rmap_desc {
156         u64 *shadow_ptes[RMAP_EXT];
157         struct kvm_rmap_desc *more;
158 };
159
160 static struct kmem_cache *pte_chain_cache;
161 static struct kmem_cache *rmap_desc_cache;
162 static struct kmem_cache *mmu_page_header_cache;
163
164 static u64 __read_mostly shadow_trap_nonpresent_pte;
165 static u64 __read_mostly shadow_notrap_nonpresent_pte;
166
167 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
168 {
169         shadow_trap_nonpresent_pte = trap_pte;
170         shadow_notrap_nonpresent_pte = notrap_pte;
171 }
172 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
173
174 static int is_write_protection(struct kvm_vcpu *vcpu)
175 {
176         return vcpu->cr0 & X86_CR0_WP;
177 }
178
179 static int is_cpuid_PSE36(void)
180 {
181         return 1;
182 }
183
184 static int is_nx(struct kvm_vcpu *vcpu)
185 {
186         return vcpu->shadow_efer & EFER_NX;
187 }
188
189 static int is_present_pte(unsigned long pte)
190 {
191         return pte & PT_PRESENT_MASK;
192 }
193
194 static int is_shadow_present_pte(u64 pte)
195 {
196         pte &= ~PT_SHADOW_IO_MARK;
197         return pte != shadow_trap_nonpresent_pte
198                 && pte != shadow_notrap_nonpresent_pte;
199 }
200
201 static int is_writeble_pte(unsigned long pte)
202 {
203         return pte & PT_WRITABLE_MASK;
204 }
205
206 static int is_dirty_pte(unsigned long pte)
207 {
208         return pte & PT_DIRTY_MASK;
209 }
210
211 static int is_io_pte(unsigned long pte)
212 {
213         return pte & PT_SHADOW_IO_MARK;
214 }
215
216 static int is_rmap_pte(u64 pte)
217 {
218         return pte != shadow_trap_nonpresent_pte
219                 && pte != shadow_notrap_nonpresent_pte;
220 }
221
222 static gfn_t pse36_gfn_delta(u32 gpte)
223 {
224         int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
225
226         return (gpte & PT32_DIR_PSE36_MASK) << shift;
227 }
228
229 static void set_shadow_pte(u64 *sptep, u64 spte)
230 {
231 #ifdef CONFIG_X86_64
232         set_64bit((unsigned long *)sptep, spte);
233 #else
234         set_64bit((unsigned long long *)sptep, spte);
235 #endif
236 }
237
238 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
239                                   struct kmem_cache *base_cache, int min)
240 {
241         void *obj;
242
243         if (cache->nobjs >= min)
244                 return 0;
245         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
246                 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
247                 if (!obj)
248                         return -ENOMEM;
249                 cache->objects[cache->nobjs++] = obj;
250         }
251         return 0;
252 }
253
254 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
255 {
256         while (mc->nobjs)
257                 kfree(mc->objects[--mc->nobjs]);
258 }
259
260 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
261                                        int min)
262 {
263         struct page *page;
264
265         if (cache->nobjs >= min)
266                 return 0;
267         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
268                 page = alloc_page(GFP_KERNEL);
269                 if (!page)
270                         return -ENOMEM;
271                 set_page_private(page, 0);
272                 cache->objects[cache->nobjs++] = page_address(page);
273         }
274         return 0;
275 }
276
277 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
278 {
279         while (mc->nobjs)
280                 free_page((unsigned long)mc->objects[--mc->nobjs]);
281 }
282
283 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
284 {
285         int r;
286
287         kvm_mmu_free_some_pages(vcpu);
288         r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
289                                    pte_chain_cache, 4);
290         if (r)
291                 goto out;
292         r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
293                                    rmap_desc_cache, 1);
294         if (r)
295                 goto out;
296         r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 8);
297         if (r)
298                 goto out;
299         r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache,
300                                    mmu_page_header_cache, 4);
301 out:
302         return r;
303 }
304
305 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
306 {
307         mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
308         mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
309         mmu_free_memory_cache_page(&vcpu->mmu_page_cache);
310         mmu_free_memory_cache(&vcpu->mmu_page_header_cache);
311 }
312
313 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
314                                     size_t size)
315 {
316         void *p;
317
318         BUG_ON(!mc->nobjs);
319         p = mc->objects[--mc->nobjs];
320         memset(p, 0, size);
321         return p;
322 }
323
324 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
325 {
326         return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache,
327                                       sizeof(struct kvm_pte_chain));
328 }
329
330 static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
331 {
332         kfree(pc);
333 }
334
335 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
336 {
337         return mmu_memory_cache_alloc(&vcpu->mmu_rmap_desc_cache,
338                                       sizeof(struct kvm_rmap_desc));
339 }
340
341 static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
342 {
343         kfree(rd);
344 }
345
346 /*
347  * Take gfn and return the reverse mapping to it.
348  * Note: gfn must be unaliased before this function get called
349  */
350
351 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn)
352 {
353         struct kvm_memory_slot *slot;
354
355         slot = gfn_to_memslot(kvm, gfn);
356         return &slot->rmap[gfn - slot->base_gfn];
357 }
358
359 /*
360  * Reverse mapping data structures:
361  *
362  * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
363  * that points to page_address(page).
364  *
365  * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
366  * containing more mappings.
367  */
368 static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
369 {
370         struct kvm_mmu_page *page;
371         struct kvm_rmap_desc *desc;
372         unsigned long *rmapp;
373         int i;
374
375         if (!is_rmap_pte(*spte))
376                 return;
377         gfn = unalias_gfn(vcpu->kvm, gfn);
378         page = page_header(__pa(spte));
379         page->gfns[spte - page->spt] = gfn;
380         rmapp = gfn_to_rmap(vcpu->kvm, gfn);
381         if (!*rmapp) {
382                 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
383                 *rmapp = (unsigned long)spte;
384         } else if (!(*rmapp & 1)) {
385                 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
386                 desc = mmu_alloc_rmap_desc(vcpu);
387                 desc->shadow_ptes[0] = (u64 *)*rmapp;
388                 desc->shadow_ptes[1] = spte;
389                 *rmapp = (unsigned long)desc | 1;
390         } else {
391                 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
392                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
393                 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
394                         desc = desc->more;
395                 if (desc->shadow_ptes[RMAP_EXT-1]) {
396                         desc->more = mmu_alloc_rmap_desc(vcpu);
397                         desc = desc->more;
398                 }
399                 for (i = 0; desc->shadow_ptes[i]; ++i)
400                         ;
401                 desc->shadow_ptes[i] = spte;
402         }
403 }
404
405 static void rmap_desc_remove_entry(unsigned long *rmapp,
406                                    struct kvm_rmap_desc *desc,
407                                    int i,
408                                    struct kvm_rmap_desc *prev_desc)
409 {
410         int j;
411
412         for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
413                 ;
414         desc->shadow_ptes[i] = desc->shadow_ptes[j];
415         desc->shadow_ptes[j] = NULL;
416         if (j != 0)
417                 return;
418         if (!prev_desc && !desc->more)
419                 *rmapp = (unsigned long)desc->shadow_ptes[0];
420         else
421                 if (prev_desc)
422                         prev_desc->more = desc->more;
423                 else
424                         *rmapp = (unsigned long)desc->more | 1;
425         mmu_free_rmap_desc(desc);
426 }
427
428 static void rmap_remove(struct kvm *kvm, u64 *spte)
429 {
430         struct kvm_rmap_desc *desc;
431         struct kvm_rmap_desc *prev_desc;
432         struct kvm_mmu_page *page;
433         struct page *release_page;
434         unsigned long *rmapp;
435         int i;
436
437         if (!is_rmap_pte(*spte))
438                 return;
439         page = page_header(__pa(spte));
440         release_page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
441         if (is_writeble_pte(*spte))
442                 kvm_release_page_dirty(release_page);
443         else
444                 kvm_release_page_clean(release_page);
445         rmapp = gfn_to_rmap(kvm, page->gfns[spte - page->spt]);
446         if (!*rmapp) {
447                 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
448                 BUG();
449         } else if (!(*rmapp & 1)) {
450                 rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
451                 if ((u64 *)*rmapp != spte) {
452                         printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
453                                spte, *spte);
454                         BUG();
455                 }
456                 *rmapp = 0;
457         } else {
458                 rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
459                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
460                 prev_desc = NULL;
461                 while (desc) {
462                         for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
463                                 if (desc->shadow_ptes[i] == spte) {
464                                         rmap_desc_remove_entry(rmapp,
465                                                                desc, i,
466                                                                prev_desc);
467                                         return;
468                                 }
469                         prev_desc = desc;
470                         desc = desc->more;
471                 }
472                 BUG();
473         }
474 }
475
476 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
477 {
478         struct kvm_rmap_desc *desc;
479         struct kvm_rmap_desc *prev_desc;
480         u64 *prev_spte;
481         int i;
482
483         if (!*rmapp)
484                 return NULL;
485         else if (!(*rmapp & 1)) {
486                 if (!spte)
487                         return (u64 *)*rmapp;
488                 return NULL;
489         }
490         desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
491         prev_desc = NULL;
492         prev_spte = NULL;
493         while (desc) {
494                 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
495                         if (prev_spte == spte)
496                                 return desc->shadow_ptes[i];
497                         prev_spte = desc->shadow_ptes[i];
498                 }
499                 desc = desc->more;
500         }
501         return NULL;
502 }
503
504 static void rmap_write_protect(struct kvm *kvm, u64 gfn)
505 {
506         unsigned long *rmapp;
507         u64 *spte;
508
509         gfn = unalias_gfn(kvm, gfn);
510         rmapp = gfn_to_rmap(kvm, gfn);
511
512         spte = rmap_next(kvm, rmapp, NULL);
513         while (spte) {
514                 BUG_ON(!spte);
515                 BUG_ON(!(*spte & PT_PRESENT_MASK));
516                 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
517                 if (is_writeble_pte(*spte))
518                         set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
519                 kvm_flush_remote_tlbs(kvm);
520                 spte = rmap_next(kvm, rmapp, spte);
521         }
522 }
523
524 #ifdef MMU_DEBUG
525 static int is_empty_shadow_page(u64 *spt)
526 {
527         u64 *pos;
528         u64 *end;
529
530         for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
531                 if ((*pos & ~PT_SHADOW_IO_MARK) != shadow_trap_nonpresent_pte) {
532                         printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
533                                pos, *pos);
534                         return 0;
535                 }
536         return 1;
537 }
538 #endif
539
540 static void kvm_mmu_free_page(struct kvm *kvm,
541                               struct kvm_mmu_page *page_head)
542 {
543         ASSERT(is_empty_shadow_page(page_head->spt));
544         list_del(&page_head->link);
545         __free_page(virt_to_page(page_head->spt));
546         __free_page(virt_to_page(page_head->gfns));
547         kfree(page_head);
548         ++kvm->n_free_mmu_pages;
549 }
550
551 static unsigned kvm_page_table_hashfn(gfn_t gfn)
552 {
553         return gfn;
554 }
555
556 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
557                                                u64 *parent_pte)
558 {
559         struct kvm_mmu_page *page;
560
561         if (!vcpu->kvm->n_free_mmu_pages)
562                 return NULL;
563
564         page = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache,
565                                       sizeof *page);
566         page->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
567         page->gfns = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
568         set_page_private(virt_to_page(page->spt), (unsigned long)page);
569         list_add(&page->link, &vcpu->kvm->active_mmu_pages);
570         ASSERT(is_empty_shadow_page(page->spt));
571         page->slot_bitmap = 0;
572         page->multimapped = 0;
573         page->parent_pte = parent_pte;
574         --vcpu->kvm->n_free_mmu_pages;
575         return page;
576 }
577
578 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
579                                     struct kvm_mmu_page *page, u64 *parent_pte)
580 {
581         struct kvm_pte_chain *pte_chain;
582         struct hlist_node *node;
583         int i;
584
585         if (!parent_pte)
586                 return;
587         if (!page->multimapped) {
588                 u64 *old = page->parent_pte;
589
590                 if (!old) {
591                         page->parent_pte = parent_pte;
592                         return;
593                 }
594                 page->multimapped = 1;
595                 pte_chain = mmu_alloc_pte_chain(vcpu);
596                 INIT_HLIST_HEAD(&page->parent_ptes);
597                 hlist_add_head(&pte_chain->link, &page->parent_ptes);
598                 pte_chain->parent_ptes[0] = old;
599         }
600         hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link) {
601                 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
602                         continue;
603                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
604                         if (!pte_chain->parent_ptes[i]) {
605                                 pte_chain->parent_ptes[i] = parent_pte;
606                                 return;
607                         }
608         }
609         pte_chain = mmu_alloc_pte_chain(vcpu);
610         BUG_ON(!pte_chain);
611         hlist_add_head(&pte_chain->link, &page->parent_ptes);
612         pte_chain->parent_ptes[0] = parent_pte;
613 }
614
615 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page,
616                                        u64 *parent_pte)
617 {
618         struct kvm_pte_chain *pte_chain;
619         struct hlist_node *node;
620         int i;
621
622         if (!page->multimapped) {
623                 BUG_ON(page->parent_pte != parent_pte);
624                 page->parent_pte = NULL;
625                 return;
626         }
627         hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link)
628                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
629                         if (!pte_chain->parent_ptes[i])
630                                 break;
631                         if (pte_chain->parent_ptes[i] != parent_pte)
632                                 continue;
633                         while (i + 1 < NR_PTE_CHAIN_ENTRIES
634                                 && pte_chain->parent_ptes[i + 1]) {
635                                 pte_chain->parent_ptes[i]
636                                         = pte_chain->parent_ptes[i + 1];
637                                 ++i;
638                         }
639                         pte_chain->parent_ptes[i] = NULL;
640                         if (i == 0) {
641                                 hlist_del(&pte_chain->link);
642                                 mmu_free_pte_chain(pte_chain);
643                                 if (hlist_empty(&page->parent_ptes)) {
644                                         page->multimapped = 0;
645                                         page->parent_pte = NULL;
646                                 }
647                         }
648                         return;
649                 }
650         BUG();
651 }
652
653 static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm,
654                                                 gfn_t gfn)
655 {
656         unsigned index;
657         struct hlist_head *bucket;
658         struct kvm_mmu_page *page;
659         struct hlist_node *node;
660
661         pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
662         index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
663         bucket = &kvm->mmu_page_hash[index];
664         hlist_for_each_entry(page, node, bucket, hash_link)
665                 if (page->gfn == gfn && !page->role.metaphysical) {
666                         pgprintk("%s: found role %x\n",
667                                  __FUNCTION__, page->role.word);
668                         return page;
669                 }
670         return NULL;
671 }
672
673 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
674                                              gfn_t gfn,
675                                              gva_t gaddr,
676                                              unsigned level,
677                                              int metaphysical,
678                                              unsigned hugepage_access,
679                                              u64 *parent_pte)
680 {
681         union kvm_mmu_page_role role;
682         unsigned index;
683         unsigned quadrant;
684         struct hlist_head *bucket;
685         struct kvm_mmu_page *page;
686         struct hlist_node *node;
687
688         role.word = 0;
689         role.glevels = vcpu->mmu.root_level;
690         role.level = level;
691         role.metaphysical = metaphysical;
692         role.hugepage_access = hugepage_access;
693         if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) {
694                 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
695                 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
696                 role.quadrant = quadrant;
697         }
698         pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
699                  gfn, role.word);
700         index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
701         bucket = &vcpu->kvm->mmu_page_hash[index];
702         hlist_for_each_entry(page, node, bucket, hash_link)
703                 if (page->gfn == gfn && page->role.word == role.word) {
704                         mmu_page_add_parent_pte(vcpu, page, parent_pte);
705                         pgprintk("%s: found\n", __FUNCTION__);
706                         return page;
707                 }
708         page = kvm_mmu_alloc_page(vcpu, parent_pte);
709         if (!page)
710                 return page;
711         pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
712         page->gfn = gfn;
713         page->role = role;
714         hlist_add_head(&page->hash_link, bucket);
715         vcpu->mmu.prefetch_page(vcpu, page);
716         if (!metaphysical)
717                 rmap_write_protect(vcpu->kvm, gfn);
718         return page;
719 }
720
721 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
722                                          struct kvm_mmu_page *page)
723 {
724         unsigned i;
725         u64 *pt;
726         u64 ent;
727
728         pt = page->spt;
729
730         if (page->role.level == PT_PAGE_TABLE_LEVEL) {
731                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
732                         if (is_shadow_present_pte(pt[i]))
733                                 rmap_remove(kvm, &pt[i]);
734                         pt[i] = shadow_trap_nonpresent_pte;
735                 }
736                 kvm_flush_remote_tlbs(kvm);
737                 return;
738         }
739
740         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
741                 ent = pt[i];
742
743                 pt[i] = shadow_trap_nonpresent_pte;
744                 if (!is_shadow_present_pte(ent))
745                         continue;
746                 ent &= PT64_BASE_ADDR_MASK;
747                 mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
748         }
749         kvm_flush_remote_tlbs(kvm);
750 }
751
752 static void kvm_mmu_put_page(struct kvm_mmu_page *page,
753                              u64 *parent_pte)
754 {
755         mmu_page_remove_parent_pte(page, parent_pte);
756 }
757
758 static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
759 {
760         int i;
761
762         for (i = 0; i < KVM_MAX_VCPUS; ++i)
763                 if (kvm->vcpus[i])
764                         kvm->vcpus[i]->last_pte_updated = NULL;
765 }
766
767 static void kvm_mmu_zap_page(struct kvm *kvm,
768                              struct kvm_mmu_page *page)
769 {
770         u64 *parent_pte;
771
772         ++kvm->stat.mmu_shadow_zapped;
773         while (page->multimapped || page->parent_pte) {
774                 if (!page->multimapped)
775                         parent_pte = page->parent_pte;
776                 else {
777                         struct kvm_pte_chain *chain;
778
779                         chain = container_of(page->parent_ptes.first,
780                                              struct kvm_pte_chain, link);
781                         parent_pte = chain->parent_ptes[0];
782                 }
783                 BUG_ON(!parent_pte);
784                 kvm_mmu_put_page(page, parent_pte);
785                 set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
786         }
787         kvm_mmu_page_unlink_children(kvm, page);
788         if (!page->root_count) {
789                 hlist_del(&page->hash_link);
790                 kvm_mmu_free_page(kvm, page);
791         } else
792                 list_move(&page->link, &kvm->active_mmu_pages);
793         kvm_mmu_reset_last_pte_updated(kvm);
794 }
795
796 /*
797  * Changing the number of mmu pages allocated to the vm
798  * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
799  */
800 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
801 {
802         /*
803          * If we set the number of mmu pages to be smaller be than the
804          * number of actived pages , we must to free some mmu pages before we
805          * change the value
806          */
807
808         if ((kvm->n_alloc_mmu_pages - kvm->n_free_mmu_pages) >
809             kvm_nr_mmu_pages) {
810                 int n_used_mmu_pages = kvm->n_alloc_mmu_pages
811                                        - kvm->n_free_mmu_pages;
812
813                 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
814                         struct kvm_mmu_page *page;
815
816                         page = container_of(kvm->active_mmu_pages.prev,
817                                             struct kvm_mmu_page, link);
818                         kvm_mmu_zap_page(kvm, page);
819                         n_used_mmu_pages--;
820                 }
821                 kvm->n_free_mmu_pages = 0;
822         }
823         else
824                 kvm->n_free_mmu_pages += kvm_nr_mmu_pages
825                                          - kvm->n_alloc_mmu_pages;
826
827         kvm->n_alloc_mmu_pages = kvm_nr_mmu_pages;
828 }
829
830 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
831 {
832         unsigned index;
833         struct hlist_head *bucket;
834         struct kvm_mmu_page *page;
835         struct hlist_node *node, *n;
836         int r;
837
838         pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
839         r = 0;
840         index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
841         bucket = &kvm->mmu_page_hash[index];
842         hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
843                 if (page->gfn == gfn && !page->role.metaphysical) {
844                         pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
845                                  page->role.word);
846                         kvm_mmu_zap_page(kvm, page);
847                         r = 1;
848                 }
849         return r;
850 }
851
852 static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
853 {
854         struct kvm_mmu_page *page;
855
856         while ((page = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
857                 pgprintk("%s: zap %lx %x\n",
858                          __FUNCTION__, gfn, page->role.word);
859                 kvm_mmu_zap_page(kvm, page);
860         }
861 }
862
863 static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
864 {
865         int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
866         struct kvm_mmu_page *page_head = page_header(__pa(pte));
867
868         __set_bit(slot, &page_head->slot_bitmap);
869 }
870
871 hpa_t gpa_to_hpa(struct kvm *kvm, gpa_t gpa)
872 {
873         struct page *page;
874         hpa_t hpa;
875
876         ASSERT((gpa & HPA_ERR_MASK) == 0);
877         page = gfn_to_page(kvm, gpa >> PAGE_SHIFT);
878         hpa = ((hpa_t)page_to_pfn(page) << PAGE_SHIFT) | (gpa & (PAGE_SIZE-1));
879         if (is_error_page(page))
880                 return hpa | HPA_ERR_MASK;
881         return hpa;
882 }
883
884 hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva)
885 {
886         gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
887
888         if (gpa == UNMAPPED_GVA)
889                 return UNMAPPED_GVA;
890         return gpa_to_hpa(vcpu->kvm, gpa);
891 }
892
893 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
894 {
895         gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
896
897         if (gpa == UNMAPPED_GVA)
898                 return NULL;
899         return pfn_to_page(gpa_to_hpa(vcpu->kvm, gpa) >> PAGE_SHIFT);
900 }
901
902 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
903 {
904 }
905
906 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, struct page *page)
907 {
908         int level = PT32E_ROOT_LEVEL;
909         hpa_t table_addr = vcpu->mmu.root_hpa;
910
911         for (; ; level--) {
912                 u32 index = PT64_INDEX(v, level);
913                 u64 *table;
914                 u64 pte;
915
916                 ASSERT(VALID_PAGE(table_addr));
917                 table = __va(table_addr);
918
919                 if (level == 1) {
920                         int was_rmapped;
921
922                         pte = table[index];
923                         was_rmapped = is_rmap_pte(pte);
924                         if (is_shadow_present_pte(pte) && is_writeble_pte(pte)) {
925                                 kvm_release_page_clean(page);
926                                 return 0;
927                         }
928                         mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
929                         page_header_update_slot(vcpu->kvm, table,
930                                                 v >> PAGE_SHIFT);
931                         table[index] = page_to_phys(page)
932                                 | PT_PRESENT_MASK | PT_WRITABLE_MASK
933                                 | PT_USER_MASK;
934                         if (!was_rmapped)
935                                 rmap_add(vcpu, &table[index], v >> PAGE_SHIFT);
936                         else
937                                 kvm_release_page_clean(page);
938
939                         return 0;
940                 }
941
942                 if (table[index] == shadow_trap_nonpresent_pte) {
943                         struct kvm_mmu_page *new_table;
944                         gfn_t pseudo_gfn;
945
946                         pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
947                                 >> PAGE_SHIFT;
948                         new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
949                                                      v, level - 1,
950                                                      1, 3, &table[index]);
951                         if (!new_table) {
952                                 pgprintk("nonpaging_map: ENOMEM\n");
953                                 kvm_release_page_clean(page);
954                                 return -ENOMEM;
955                         }
956
957                         table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
958                                 | PT_WRITABLE_MASK | PT_USER_MASK;
959                 }
960                 table_addr = table[index] & PT64_BASE_ADDR_MASK;
961         }
962 }
963
964 static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
965                                     struct kvm_mmu_page *sp)
966 {
967         int i;
968
969         for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
970                 sp->spt[i] = shadow_trap_nonpresent_pte;
971 }
972
973 static void mmu_free_roots(struct kvm_vcpu *vcpu)
974 {
975         int i;
976         struct kvm_mmu_page *page;
977
978         if (!VALID_PAGE(vcpu->mmu.root_hpa))
979                 return;
980 #ifdef CONFIG_X86_64
981         if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
982                 hpa_t root = vcpu->mmu.root_hpa;
983
984                 page = page_header(root);
985                 --page->root_count;
986                 vcpu->mmu.root_hpa = INVALID_PAGE;
987                 return;
988         }
989 #endif
990         for (i = 0; i < 4; ++i) {
991                 hpa_t root = vcpu->mmu.pae_root[i];
992
993                 if (root) {
994                         root &= PT64_BASE_ADDR_MASK;
995                         page = page_header(root);
996                         --page->root_count;
997                 }
998                 vcpu->mmu.pae_root[i] = INVALID_PAGE;
999         }
1000         vcpu->mmu.root_hpa = INVALID_PAGE;
1001 }
1002
1003 static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1004 {
1005         int i;
1006         gfn_t root_gfn;
1007         struct kvm_mmu_page *page;
1008
1009         root_gfn = vcpu->cr3 >> PAGE_SHIFT;
1010
1011 #ifdef CONFIG_X86_64
1012         if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1013                 hpa_t root = vcpu->mmu.root_hpa;
1014
1015                 ASSERT(!VALID_PAGE(root));
1016                 page = kvm_mmu_get_page(vcpu, root_gfn, 0,
1017                                         PT64_ROOT_LEVEL, 0, 0, NULL);
1018                 root = __pa(page->spt);
1019                 ++page->root_count;
1020                 vcpu->mmu.root_hpa = root;
1021                 return;
1022         }
1023 #endif
1024         for (i = 0; i < 4; ++i) {
1025                 hpa_t root = vcpu->mmu.pae_root[i];
1026
1027                 ASSERT(!VALID_PAGE(root));
1028                 if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL) {
1029                         if (!is_present_pte(vcpu->pdptrs[i])) {
1030                                 vcpu->mmu.pae_root[i] = 0;
1031                                 continue;
1032                         }
1033                         root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT;
1034                 } else if (vcpu->mmu.root_level == 0)
1035                         root_gfn = 0;
1036                 page = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1037                                         PT32_ROOT_LEVEL, !is_paging(vcpu),
1038                                         0, NULL);
1039                 root = __pa(page->spt);
1040                 ++page->root_count;
1041                 vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
1042         }
1043         vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);
1044 }
1045
1046 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
1047 {
1048         return vaddr;
1049 }
1050
1051 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
1052                                 u32 error_code)
1053 {
1054         struct page *page;
1055         int r;
1056
1057         r = mmu_topup_memory_caches(vcpu);
1058         if (r)
1059                 return r;
1060
1061         ASSERT(vcpu);
1062         ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
1063
1064         page = gfn_to_page(vcpu->kvm, gva >> PAGE_SHIFT);
1065
1066         if (is_error_page(page)) {
1067                 kvm_release_page_clean(page);
1068                 return 1;
1069         }
1070
1071         return nonpaging_map(vcpu, gva & PAGE_MASK, page);
1072 }
1073
1074 static void nonpaging_free(struct kvm_vcpu *vcpu)
1075 {
1076         mmu_free_roots(vcpu);
1077 }
1078
1079 static int nonpaging_init_context(struct kvm_vcpu *vcpu)
1080 {
1081         struct kvm_mmu *context = &vcpu->mmu;
1082
1083         context->new_cr3 = nonpaging_new_cr3;
1084         context->page_fault = nonpaging_page_fault;
1085         context->gva_to_gpa = nonpaging_gva_to_gpa;
1086         context->free = nonpaging_free;
1087         context->prefetch_page = nonpaging_prefetch_page;
1088         context->root_level = 0;
1089         context->shadow_root_level = PT32E_ROOT_LEVEL;
1090         context->root_hpa = INVALID_PAGE;
1091         return 0;
1092 }
1093
1094 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
1095 {
1096         ++vcpu->stat.tlb_flush;
1097         kvm_x86_ops->tlb_flush(vcpu);
1098 }
1099
1100 static void paging_new_cr3(struct kvm_vcpu *vcpu)
1101 {
1102         pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
1103         mmu_free_roots(vcpu);
1104 }
1105
1106 static void inject_page_fault(struct kvm_vcpu *vcpu,
1107                               u64 addr,
1108                               u32 err_code)
1109 {
1110         kvm_x86_ops->inject_page_fault(vcpu, addr, err_code);
1111 }
1112
1113 static void paging_free(struct kvm_vcpu *vcpu)
1114 {
1115         nonpaging_free(vcpu);
1116 }
1117
1118 #define PTTYPE 64
1119 #include "paging_tmpl.h"
1120 #undef PTTYPE
1121
1122 #define PTTYPE 32
1123 #include "paging_tmpl.h"
1124 #undef PTTYPE
1125
1126 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
1127 {
1128         struct kvm_mmu *context = &vcpu->mmu;
1129
1130         ASSERT(is_pae(vcpu));
1131         context->new_cr3 = paging_new_cr3;
1132         context->page_fault = paging64_page_fault;
1133         context->gva_to_gpa = paging64_gva_to_gpa;
1134         context->prefetch_page = paging64_prefetch_page;
1135         context->free = paging_free;
1136         context->root_level = level;
1137         context->shadow_root_level = level;
1138         context->root_hpa = INVALID_PAGE;
1139         return 0;
1140 }
1141
1142 static int paging64_init_context(struct kvm_vcpu *vcpu)
1143 {
1144         return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
1145 }
1146
1147 static int paging32_init_context(struct kvm_vcpu *vcpu)
1148 {
1149         struct kvm_mmu *context = &vcpu->mmu;
1150
1151         context->new_cr3 = paging_new_cr3;
1152         context->page_fault = paging32_page_fault;
1153         context->gva_to_gpa = paging32_gva_to_gpa;
1154         context->free = paging_free;
1155         context->prefetch_page = paging32_prefetch_page;
1156         context->root_level = PT32_ROOT_LEVEL;
1157         context->shadow_root_level = PT32E_ROOT_LEVEL;
1158         context->root_hpa = INVALID_PAGE;
1159         return 0;
1160 }
1161
1162 static int paging32E_init_context(struct kvm_vcpu *vcpu)
1163 {
1164         return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
1165 }
1166
1167 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1168 {
1169         ASSERT(vcpu);
1170         ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1171
1172         if (!is_paging(vcpu))
1173                 return nonpaging_init_context(vcpu);
1174         else if (is_long_mode(vcpu))
1175                 return paging64_init_context(vcpu);
1176         else if (is_pae(vcpu))
1177                 return paging32E_init_context(vcpu);
1178         else
1179                 return paging32_init_context(vcpu);
1180 }
1181
1182 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
1183 {
1184         ASSERT(vcpu);
1185         if (VALID_PAGE(vcpu->mmu.root_hpa)) {
1186                 vcpu->mmu.free(vcpu);
1187                 vcpu->mmu.root_hpa = INVALID_PAGE;
1188         }
1189 }
1190
1191 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
1192 {
1193         destroy_kvm_mmu(vcpu);
1194         return init_kvm_mmu(vcpu);
1195 }
1196 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
1197
1198 int kvm_mmu_load(struct kvm_vcpu *vcpu)
1199 {
1200         int r;
1201
1202         mutex_lock(&vcpu->kvm->lock);
1203         r = mmu_topup_memory_caches(vcpu);
1204         if (r)
1205                 goto out;
1206         mmu_alloc_roots(vcpu);
1207         kvm_x86_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
1208         kvm_mmu_flush_tlb(vcpu);
1209 out:
1210         mutex_unlock(&vcpu->kvm->lock);
1211         return r;
1212 }
1213 EXPORT_SYMBOL_GPL(kvm_mmu_load);
1214
1215 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
1216 {
1217         mmu_free_roots(vcpu);
1218 }
1219
1220 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
1221                                   struct kvm_mmu_page *page,
1222                                   u64 *spte)
1223 {
1224         u64 pte;
1225         struct kvm_mmu_page *child;
1226
1227         pte = *spte;
1228         if (is_shadow_present_pte(pte)) {
1229                 if (page->role.level == PT_PAGE_TABLE_LEVEL)
1230                         rmap_remove(vcpu->kvm, spte);
1231                 else {
1232                         child = page_header(pte & PT64_BASE_ADDR_MASK);
1233                         mmu_page_remove_parent_pte(child, spte);
1234                 }
1235         }
1236         set_shadow_pte(spte, shadow_trap_nonpresent_pte);
1237 }
1238
1239 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1240                                   struct kvm_mmu_page *page,
1241                                   u64 *spte,
1242                                   const void *new, int bytes,
1243                                   int offset_in_pte)
1244 {
1245         if (page->role.level != PT_PAGE_TABLE_LEVEL) {
1246                 ++vcpu->kvm->stat.mmu_pde_zapped;
1247                 return;
1248         }
1249
1250         ++vcpu->kvm->stat.mmu_pte_updated;
1251         if (page->role.glevels == PT32_ROOT_LEVEL)
1252                 paging32_update_pte(vcpu, page, spte, new, bytes,
1253                                     offset_in_pte);
1254         else
1255                 paging64_update_pte(vcpu, page, spte, new, bytes,
1256                                     offset_in_pte);
1257 }
1258
1259 static bool need_remote_flush(u64 old, u64 new)
1260 {
1261         if (!is_shadow_present_pte(old))
1262                 return false;
1263         if (!is_shadow_present_pte(new))
1264                 return true;
1265         if ((old ^ new) & PT64_BASE_ADDR_MASK)
1266                 return true;
1267         old ^= PT64_NX_MASK;
1268         new ^= PT64_NX_MASK;
1269         return (old & ~new & PT64_PERM_MASK) != 0;
1270 }
1271
1272 static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
1273 {
1274         if (need_remote_flush(old, new))
1275                 kvm_flush_remote_tlbs(vcpu->kvm);
1276         else
1277                 kvm_mmu_flush_tlb(vcpu);
1278 }
1279
1280 static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
1281 {
1282         u64 *spte = vcpu->last_pte_updated;
1283
1284         return !!(spte && (*spte & PT_ACCESSED_MASK));
1285 }
1286
1287 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1288                        const u8 *new, int bytes)
1289 {
1290         gfn_t gfn = gpa >> PAGE_SHIFT;
1291         struct kvm_mmu_page *page;
1292         struct hlist_node *node, *n;
1293         struct hlist_head *bucket;
1294         unsigned index;
1295         u64 entry;
1296         u64 *spte;
1297         unsigned offset = offset_in_page(gpa);
1298         unsigned pte_size;
1299         unsigned page_offset;
1300         unsigned misaligned;
1301         unsigned quadrant;
1302         int level;
1303         int flooded = 0;
1304         int npte;
1305
1306         pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
1307         ++vcpu->kvm->stat.mmu_pte_write;
1308         kvm_mmu_audit(vcpu, "pre pte write");
1309         if (gfn == vcpu->last_pt_write_gfn
1310             && !last_updated_pte_accessed(vcpu)) {
1311                 ++vcpu->last_pt_write_count;
1312                 if (vcpu->last_pt_write_count >= 3)
1313                         flooded = 1;
1314         } else {
1315                 vcpu->last_pt_write_gfn = gfn;
1316                 vcpu->last_pt_write_count = 1;
1317                 vcpu->last_pte_updated = NULL;
1318         }
1319         index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
1320         bucket = &vcpu->kvm->mmu_page_hash[index];
1321         hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
1322                 if (page->gfn != gfn || page->role.metaphysical)
1323                         continue;
1324                 pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
1325                 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
1326                 misaligned |= bytes < 4;
1327                 if (misaligned || flooded) {
1328                         /*
1329                          * Misaligned accesses are too much trouble to fix
1330                          * up; also, they usually indicate a page is not used
1331                          * as a page table.
1332                          *
1333                          * If we're seeing too many writes to a page,
1334                          * it may no longer be a page table, or we may be
1335                          * forking, in which case it is better to unmap the
1336                          * page.
1337                          */
1338                         pgprintk("misaligned: gpa %llx bytes %d role %x\n",
1339                                  gpa, bytes, page->role.word);
1340                         kvm_mmu_zap_page(vcpu->kvm, page);
1341                         ++vcpu->kvm->stat.mmu_flooded;
1342                         continue;
1343                 }
1344                 page_offset = offset;
1345                 level = page->role.level;
1346                 npte = 1;
1347                 if (page->role.glevels == PT32_ROOT_LEVEL) {
1348                         page_offset <<= 1;      /* 32->64 */
1349                         /*
1350                          * A 32-bit pde maps 4MB while the shadow pdes map
1351                          * only 2MB.  So we need to double the offset again
1352                          * and zap two pdes instead of one.
1353                          */
1354                         if (level == PT32_ROOT_LEVEL) {
1355                                 page_offset &= ~7; /* kill rounding error */
1356                                 page_offset <<= 1;
1357                                 npte = 2;
1358                         }
1359                         quadrant = page_offset >> PAGE_SHIFT;
1360                         page_offset &= ~PAGE_MASK;
1361                         if (quadrant != page->role.quadrant)
1362                                 continue;
1363                 }
1364                 spte = &page->spt[page_offset / sizeof(*spte)];
1365                 while (npte--) {
1366                         entry = *spte;
1367                         mmu_pte_write_zap_pte(vcpu, page, spte);
1368                         mmu_pte_write_new_pte(vcpu, page, spte, new, bytes,
1369                                               page_offset & (pte_size - 1));
1370                         mmu_pte_write_flush_tlb(vcpu, entry, *spte);
1371                         ++spte;
1372                 }
1373         }
1374         kvm_mmu_audit(vcpu, "post pte write");
1375 }
1376
1377 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1378 {
1379         gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
1380
1381         return kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1382 }
1383
1384 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
1385 {
1386         while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
1387                 struct kvm_mmu_page *page;
1388
1389                 page = container_of(vcpu->kvm->active_mmu_pages.prev,
1390                                     struct kvm_mmu_page, link);
1391                 kvm_mmu_zap_page(vcpu->kvm, page);
1392                 ++vcpu->kvm->stat.mmu_recycled;
1393         }
1394 }
1395
1396 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
1397 {
1398         int r;
1399         enum emulation_result er;
1400
1401         mutex_lock(&vcpu->kvm->lock);
1402         r = vcpu->mmu.page_fault(vcpu, cr2, error_code);
1403         if (r < 0)
1404                 goto out;
1405
1406         if (!r) {
1407                 r = 1;
1408                 goto out;
1409         }
1410
1411         r = mmu_topup_memory_caches(vcpu);
1412         if (r)
1413                 goto out;
1414
1415         er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
1416         mutex_unlock(&vcpu->kvm->lock);
1417
1418         switch (er) {
1419         case EMULATE_DONE:
1420                 return 1;
1421         case EMULATE_DO_MMIO:
1422                 ++vcpu->stat.mmio_exits;
1423                 return 0;
1424         case EMULATE_FAIL:
1425                 kvm_report_emulation_failure(vcpu, "pagetable");
1426                 return 1;
1427         default:
1428                 BUG();
1429         }
1430 out:
1431         mutex_unlock(&vcpu->kvm->lock);
1432         return r;
1433 }
1434 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
1435
1436 static void free_mmu_pages(struct kvm_vcpu *vcpu)
1437 {
1438         struct kvm_mmu_page *page;
1439
1440         while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
1441                 page = container_of(vcpu->kvm->active_mmu_pages.next,
1442                                     struct kvm_mmu_page, link);
1443                 kvm_mmu_zap_page(vcpu->kvm, page);
1444         }
1445         free_page((unsigned long)vcpu->mmu.pae_root);
1446 }
1447
1448 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1449 {
1450         struct page *page;
1451         int i;
1452
1453         ASSERT(vcpu);
1454
1455         if (vcpu->kvm->n_requested_mmu_pages)
1456                 vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_requested_mmu_pages;
1457         else
1458                 vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_alloc_mmu_pages;
1459         /*
1460          * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
1461          * Therefore we need to allocate shadow page tables in the first
1462          * 4GB of memory, which happens to fit the DMA32 zone.
1463          */
1464         page = alloc_page(GFP_KERNEL | __GFP_DMA32);
1465         if (!page)
1466                 goto error_1;
1467         vcpu->mmu.pae_root = page_address(page);
1468         for (i = 0; i < 4; ++i)
1469                 vcpu->mmu.pae_root[i] = INVALID_PAGE;
1470
1471         return 0;
1472
1473 error_1:
1474         free_mmu_pages(vcpu);
1475         return -ENOMEM;
1476 }
1477
1478 int kvm_mmu_create(struct kvm_vcpu *vcpu)
1479 {
1480         ASSERT(vcpu);
1481         ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1482
1483         return alloc_mmu_pages(vcpu);
1484 }
1485
1486 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
1487 {
1488         ASSERT(vcpu);
1489         ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1490
1491         return init_kvm_mmu(vcpu);
1492 }
1493
1494 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
1495 {
1496         ASSERT(vcpu);
1497
1498         destroy_kvm_mmu(vcpu);
1499         free_mmu_pages(vcpu);
1500         mmu_free_memory_caches(vcpu);
1501 }
1502
1503 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
1504 {
1505         struct kvm_mmu_page *page;
1506
1507         list_for_each_entry(page, &kvm->active_mmu_pages, link) {
1508                 int i;
1509                 u64 *pt;
1510
1511                 if (!test_bit(slot, &page->slot_bitmap))
1512                         continue;
1513
1514                 pt = page->spt;
1515                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1516                         /* avoid RMW */
1517                         if (pt[i] & PT_WRITABLE_MASK)
1518                                 pt[i] &= ~PT_WRITABLE_MASK;
1519         }
1520 }
1521
1522 void kvm_mmu_zap_all(struct kvm *kvm)
1523 {
1524         struct kvm_mmu_page *page, *node;
1525
1526         list_for_each_entry_safe(page, node, &kvm->active_mmu_pages, link)
1527                 kvm_mmu_zap_page(kvm, page);
1528
1529         kvm_flush_remote_tlbs(kvm);
1530 }
1531
1532 void kvm_mmu_module_exit(void)
1533 {
1534         if (pte_chain_cache)
1535                 kmem_cache_destroy(pte_chain_cache);
1536         if (rmap_desc_cache)
1537                 kmem_cache_destroy(rmap_desc_cache);
1538         if (mmu_page_header_cache)
1539                 kmem_cache_destroy(mmu_page_header_cache);
1540 }
1541
1542 int kvm_mmu_module_init(void)
1543 {
1544         pte_chain_cache = kmem_cache_create("kvm_pte_chain",
1545                                             sizeof(struct kvm_pte_chain),
1546                                             0, 0, NULL);
1547         if (!pte_chain_cache)
1548                 goto nomem;
1549         rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
1550                                             sizeof(struct kvm_rmap_desc),
1551                                             0, 0, NULL);
1552         if (!rmap_desc_cache)
1553                 goto nomem;
1554
1555         mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
1556                                                   sizeof(struct kvm_mmu_page),
1557                                                   0, 0, NULL);
1558         if (!mmu_page_header_cache)
1559                 goto nomem;
1560
1561         return 0;
1562
1563 nomem:
1564         kvm_mmu_module_exit();
1565         return -ENOMEM;
1566 }
1567
1568 /*
1569  * Caculate mmu pages needed for kvm.
1570  */
1571 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
1572 {
1573         int i;
1574         unsigned int nr_mmu_pages;
1575         unsigned int  nr_pages = 0;
1576
1577         for (i = 0; i < kvm->nmemslots; i++)
1578                 nr_pages += kvm->memslots[i].npages;
1579
1580         nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
1581         nr_mmu_pages = max(nr_mmu_pages,
1582                         (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
1583
1584         return nr_mmu_pages;
1585 }
1586
1587 #ifdef AUDIT
1588
1589 static const char *audit_msg;
1590
1591 static gva_t canonicalize(gva_t gva)
1592 {
1593 #ifdef CONFIG_X86_64
1594         gva = (long long)(gva << 16) >> 16;
1595 #endif
1596         return gva;
1597 }
1598
1599 static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
1600                                 gva_t va, int level)
1601 {
1602         u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
1603         int i;
1604         gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
1605
1606         for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
1607                 u64 ent = pt[i];
1608
1609                 if (ent == shadow_trap_nonpresent_pte)
1610                         continue;
1611
1612                 va = canonicalize(va);
1613                 if (level > 1) {
1614                         if (ent == shadow_notrap_nonpresent_pte)
1615                                 printk(KERN_ERR "audit: (%s) nontrapping pte"
1616                                        " in nonleaf level: levels %d gva %lx"
1617                                        " level %d pte %llx\n", audit_msg,
1618                                        vcpu->mmu.root_level, va, level, ent);
1619
1620                         audit_mappings_page(vcpu, ent, va, level - 1);
1621                 } else {
1622                         gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va);
1623                         hpa_t hpa = gpa_to_hpa(vcpu, gpa);
1624                         struct page *page;
1625
1626                         if (is_shadow_present_pte(ent)
1627                             && (ent & PT64_BASE_ADDR_MASK) != hpa)
1628                                 printk(KERN_ERR "xx audit error: (%s) levels %d"
1629                                        " gva %lx gpa %llx hpa %llx ent %llx %d\n",
1630                                        audit_msg, vcpu->mmu.root_level,
1631                                        va, gpa, hpa, ent,
1632                                        is_shadow_present_pte(ent));
1633                         else if (ent == shadow_notrap_nonpresent_pte
1634                                  && !is_error_hpa(hpa))
1635                                 printk(KERN_ERR "audit: (%s) notrap shadow,"
1636                                        " valid guest gva %lx\n", audit_msg, va);
1637                         page = pfn_to_page((gpa & PT64_BASE_ADDR_MASK)
1638                                            >> PAGE_SHIFT);
1639                         kvm_release_page_clean(page);
1640
1641                 }
1642         }
1643 }
1644
1645 static void audit_mappings(struct kvm_vcpu *vcpu)
1646 {
1647         unsigned i;
1648
1649         if (vcpu->mmu.root_level == 4)
1650                 audit_mappings_page(vcpu, vcpu->mmu.root_hpa, 0, 4);
1651         else
1652                 for (i = 0; i < 4; ++i)
1653                         if (vcpu->mmu.pae_root[i] & PT_PRESENT_MASK)
1654                                 audit_mappings_page(vcpu,
1655                                                     vcpu->mmu.pae_root[i],
1656                                                     i << 30,
1657                                                     2);
1658 }
1659
1660 static int count_rmaps(struct kvm_vcpu *vcpu)
1661 {
1662         int nmaps = 0;
1663         int i, j, k;
1664
1665         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1666                 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
1667                 struct kvm_rmap_desc *d;
1668
1669                 for (j = 0; j < m->npages; ++j) {
1670                         unsigned long *rmapp = &m->rmap[j];
1671
1672                         if (!*rmapp)
1673                                 continue;
1674                         if (!(*rmapp & 1)) {
1675                                 ++nmaps;
1676                                 continue;
1677                         }
1678                         d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
1679                         while (d) {
1680                                 for (k = 0; k < RMAP_EXT; ++k)
1681                                         if (d->shadow_ptes[k])
1682                                                 ++nmaps;
1683                                         else
1684                                                 break;
1685                                 d = d->more;
1686                         }
1687                 }
1688         }
1689         return nmaps;
1690 }
1691
1692 static int count_writable_mappings(struct kvm_vcpu *vcpu)
1693 {
1694         int nmaps = 0;
1695         struct kvm_mmu_page *page;
1696         int i;
1697
1698         list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
1699                 u64 *pt = page->spt;
1700
1701                 if (page->role.level != PT_PAGE_TABLE_LEVEL)
1702                         continue;
1703
1704                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1705                         u64 ent = pt[i];
1706
1707                         if (!(ent & PT_PRESENT_MASK))
1708                                 continue;
1709                         if (!(ent & PT_WRITABLE_MASK))
1710                                 continue;
1711                         ++nmaps;
1712                 }
1713         }
1714         return nmaps;
1715 }
1716
1717 static void audit_rmap(struct kvm_vcpu *vcpu)
1718 {
1719         int n_rmap = count_rmaps(vcpu);
1720         int n_actual = count_writable_mappings(vcpu);
1721
1722         if (n_rmap != n_actual)
1723                 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
1724                        __FUNCTION__, audit_msg, n_rmap, n_actual);
1725 }
1726
1727 static void audit_write_protection(struct kvm_vcpu *vcpu)
1728 {
1729         struct kvm_mmu_page *page;
1730         struct kvm_memory_slot *slot;
1731         unsigned long *rmapp;
1732         gfn_t gfn;
1733
1734         list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
1735                 if (page->role.metaphysical)
1736                         continue;
1737
1738                 slot = gfn_to_memslot(vcpu->kvm, page->gfn);
1739                 gfn = unalias_gfn(vcpu->kvm, page->gfn);
1740                 rmapp = &slot->rmap[gfn - slot->base_gfn];
1741                 if (*rmapp)
1742                         printk(KERN_ERR "%s: (%s) shadow page has writable"
1743                                " mappings: gfn %lx role %x\n",
1744                                __FUNCTION__, audit_msg, page->gfn,
1745                                page->role.word);
1746         }
1747 }
1748
1749 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
1750 {
1751         int olddbg = dbg;
1752
1753         dbg = 0;
1754         audit_msg = msg;
1755         audit_rmap(vcpu);
1756         audit_write_protection(vcpu);
1757         audit_mappings(vcpu);
1758         dbg = olddbg;
1759 }
1760
1761 #endif