4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
41 #include <linux/sched.h>
42 #include <linux/highmem.h>
43 #include <linux/bug.h>
45 #include <asm/pgtable.h>
46 #include <asm/tlbflush.h>
47 #include <asm/mmu_context.h>
48 #include <asm/paravirt.h>
50 #include <asm/xen/hypercall.h>
51 #include <asm/xen/hypervisor.h>
54 #include <xen/interface/xen.h>
56 #include "multicalls.h"
59 xmaddr_t arbitrary_virt_to_machine(unsigned long address)
61 pte_t *pte = lookup_address(address);
62 unsigned offset = address & PAGE_MASK;
66 return XMADDR((pte_mfn(*pte) << PAGE_SHIFT) + offset);
69 void make_lowmem_page_readonly(void *vaddr)
72 unsigned long address = (unsigned long)vaddr;
74 pte = lookup_address(address);
77 ptev = pte_wrprotect(*pte);
79 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
83 void make_lowmem_page_readwrite(void *vaddr)
86 unsigned long address = (unsigned long)vaddr;
88 pte = lookup_address(address);
91 ptev = pte_mkwrite(*pte);
93 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
98 void xen_set_pmd(pmd_t *ptr, pmd_t val)
100 struct multicall_space mcs;
101 struct mmu_update *u;
105 mcs = xen_mc_entry(sizeof(*u));
107 u->ptr = virt_to_machine(ptr).maddr;
108 u->val = pmd_val_ma(val);
109 MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
111 xen_mc_issue(PARAVIRT_LAZY_MMU);
117 * Associate a virtual page frame with a given physical page frame
118 * and protection flags for that frame.
120 void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
127 pgd = swapper_pg_dir + pgd_index(vaddr);
128 if (pgd_none(*pgd)) {
132 pud = pud_offset(pgd, vaddr);
133 if (pud_none(*pud)) {
137 pmd = pmd_offset(pud, vaddr);
138 if (pmd_none(*pmd)) {
142 pte = pte_offset_kernel(pmd, vaddr);
143 /* <mfn,flags> stored as-is, to permit clearing entries */
144 xen_set_pte(pte, mfn_pte(mfn, flags));
147 * It's enough to flush this one mapping.
148 * (PGE mappings get flushed as well)
150 __flush_tlb_one(vaddr);
153 void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
154 pte_t *ptep, pte_t pteval)
156 if (mm == current->mm || mm == &init_mm) {
157 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
158 struct multicall_space mcs;
159 mcs = xen_mc_entry(0);
161 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
162 xen_mc_issue(PARAVIRT_LAZY_MMU);
165 if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
168 xen_set_pte(ptep, pteval);
171 #ifdef CONFIG_X86_PAE
172 void xen_set_pud(pud_t *ptr, pud_t val)
174 struct multicall_space mcs;
175 struct mmu_update *u;
179 mcs = xen_mc_entry(sizeof(*u));
181 u->ptr = virt_to_machine(ptr).maddr;
182 u->val = pud_val_ma(val);
183 MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
185 xen_mc_issue(PARAVIRT_LAZY_MMU);
190 void xen_set_pte(pte_t *ptep, pte_t pte)
192 ptep->pte_high = pte.pte_high;
194 ptep->pte_low = pte.pte_low;
197 void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
199 set_64bit((u64 *)ptep, pte_val_ma(pte));
202 void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
205 smp_wmb(); /* make sure low gets written first */
209 void xen_pmd_clear(pmd_t *pmdp)
211 xen_set_pmd(pmdp, __pmd(0));
214 unsigned long long xen_pte_val(pte_t pte)
216 unsigned long long ret = 0;
219 ret = ((unsigned long long)pte.pte_high << 32) | pte.pte_low;
220 ret = machine_to_phys(XMADDR(ret)).paddr | 1;
226 unsigned long long xen_pmd_val(pmd_t pmd)
228 unsigned long long ret = pmd.pmd;
230 ret = machine_to_phys(XMADDR(ret)).paddr | 1;
234 unsigned long long xen_pgd_val(pgd_t pgd)
236 unsigned long long ret = pgd.pgd;
238 ret = machine_to_phys(XMADDR(ret)).paddr | 1;
242 pte_t xen_make_pte(unsigned long long pte)
245 pte = phys_to_machine(XPADDR(pte)).maddr;
247 return (pte_t){ pte, pte >> 32 };
250 pmd_t xen_make_pmd(unsigned long long pmd)
253 pmd = phys_to_machine(XPADDR(pmd)).maddr;
255 return (pmd_t){ pmd };
258 pgd_t xen_make_pgd(unsigned long long pgd)
260 if (pgd & _PAGE_PRESENT)
261 pgd = phys_to_machine(XPADDR(pgd)).maddr;
263 return (pgd_t){ pgd };
266 void xen_set_pte(pte_t *ptep, pte_t pte)
271 unsigned long xen_pte_val(pte_t pte)
273 unsigned long ret = pte.pte_low;
275 if (ret & _PAGE_PRESENT)
276 ret = machine_to_phys(XMADDR(ret)).paddr;
281 unsigned long xen_pgd_val(pgd_t pgd)
283 unsigned long ret = pgd.pgd;
285 ret = machine_to_phys(XMADDR(ret)).paddr | 1;
289 pte_t xen_make_pte(unsigned long pte)
291 if (pte & _PAGE_PRESENT)
292 pte = phys_to_machine(XPADDR(pte)).maddr;
294 return (pte_t){ pte };
297 pgd_t xen_make_pgd(unsigned long pgd)
299 if (pgd & _PAGE_PRESENT)
300 pgd = phys_to_machine(XPADDR(pgd)).maddr;
302 return (pgd_t){ pgd };
304 #endif /* CONFIG_X86_PAE */
309 (Yet another) pagetable walker. This one is intended for pinning a
310 pagetable. This means that it walks a pagetable and calls the
311 callback function on each page it finds making up the page table,
312 at every level. It walks the entire pagetable, but it only bothers
313 pinning pte pages which are below pte_limit. In the normal case
314 this will be TASK_SIZE, but at boot we need to pin up to
315 FIXADDR_TOP. But the important bit is that we don't pin beyond
316 there, because then we start getting into Xen's ptes.
318 static int pgd_walk(pgd_t *pgd_base, int (*func)(struct page *, unsigned),
321 pgd_t *pgd = pgd_base;
323 unsigned long addr = 0;
324 unsigned long pgd_next;
326 BUG_ON(limit > FIXADDR_TOP);
328 if (xen_feature(XENFEAT_auto_translated_physmap))
331 for (; addr != FIXADDR_TOP; pgd++, addr = pgd_next) {
333 unsigned long pud_limit, pud_next;
335 pgd_next = pud_limit = pgd_addr_end(addr, FIXADDR_TOP);
340 pud = pud_offset(pgd, 0);
342 if (PTRS_PER_PUD > 1) /* not folded */
343 flush |= (*func)(virt_to_page(pud), 0);
345 for (; addr != pud_limit; pud++, addr = pud_next) {
347 unsigned long pmd_limit;
349 pud_next = pud_addr_end(addr, pud_limit);
351 if (pud_next < limit)
352 pmd_limit = pud_next;
359 pmd = pmd_offset(pud, 0);
361 if (PTRS_PER_PMD > 1) /* not folded */
362 flush |= (*func)(virt_to_page(pmd), 0);
364 for (; addr != pmd_limit; pmd++) {
365 addr += (PAGE_SIZE * PTRS_PER_PTE);
366 if ((pmd_limit-1) < (addr-1)) {
374 flush |= (*func)(pmd_page(*pmd), 0);
379 flush |= (*func)(virt_to_page(pgd_base), UVMF_TLB_FLUSH);
384 static int pin_page(struct page *page, unsigned flags)
386 unsigned pgfl = test_and_set_bit(PG_pinned, &page->flags);
390 flush = 0; /* already pinned */
391 else if (PageHighMem(page))
392 /* kmaps need flushing if we found an unpinned
396 void *pt = lowmem_page_address(page);
397 unsigned long pfn = page_to_pfn(page);
398 struct multicall_space mcs = __xen_mc_entry(0);
402 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
403 pfn_pte(pfn, PAGE_KERNEL_RO),
410 /* This is called just after a mm has been created, but it has not
411 been used yet. We need to make sure that its pagetable is all
412 read-only, and can be pinned. */
413 void xen_pgd_pin(pgd_t *pgd)
415 struct multicall_space mcs;
416 struct mmuext_op *op;
420 if (pgd_walk(pgd, pin_page, TASK_SIZE)) {
421 /* re-enable interrupts for kmap_flush_unused */
427 mcs = __xen_mc_entry(sizeof(*op));
430 #ifdef CONFIG_X86_PAE
431 op->cmd = MMUEXT_PIN_L3_TABLE;
433 op->cmd = MMUEXT_PIN_L2_TABLE;
435 op->arg1.mfn = pfn_to_mfn(PFN_DOWN(__pa(pgd)));
436 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
441 /* The init_mm pagetable is really pinned as soon as its created, but
442 that's before we have page structures to store the bits. So do all
443 the book-keeping now. */
444 static __init int mark_pinned(struct page *page, unsigned flags)
450 void __init xen_mark_init_mm_pinned(void)
452 pgd_walk(init_mm.pgd, mark_pinned, FIXADDR_TOP);
455 static int unpin_page(struct page *page, unsigned flags)
457 unsigned pgfl = test_and_clear_bit(PG_pinned, &page->flags);
459 if (pgfl && !PageHighMem(page)) {
460 void *pt = lowmem_page_address(page);
461 unsigned long pfn = page_to_pfn(page);
462 struct multicall_space mcs = __xen_mc_entry(0);
464 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
465 pfn_pte(pfn, PAGE_KERNEL),
469 return 0; /* never need to flush on unpin */
472 /* Release a pagetables pages back as normal RW */
473 static void xen_pgd_unpin(pgd_t *pgd)
475 struct mmuext_op *op;
476 struct multicall_space mcs;
480 mcs = __xen_mc_entry(sizeof(*op));
483 op->cmd = MMUEXT_UNPIN_TABLE;
484 op->arg1.mfn = pfn_to_mfn(PFN_DOWN(__pa(pgd)));
486 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
488 pgd_walk(pgd, unpin_page, TASK_SIZE);
493 void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
495 spin_lock(&next->page_table_lock);
496 xen_pgd_pin(next->pgd);
497 spin_unlock(&next->page_table_lock);
500 void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
502 spin_lock(&mm->page_table_lock);
503 xen_pgd_pin(mm->pgd);
504 spin_unlock(&mm->page_table_lock);
509 /* Another cpu may still have their %cr3 pointing at the pagetable, so
510 we need to repoint it somewhere else before we can unpin it. */
511 static void drop_other_mm_ref(void *info)
513 struct mm_struct *mm = info;
515 if (__get_cpu_var(cpu_tlbstate).active_mm == mm)
516 leave_mm(smp_processor_id());
519 static void drop_mm_ref(struct mm_struct *mm)
521 if (current->active_mm == mm) {
522 if (current->mm == mm)
523 load_cr3(swapper_pg_dir);
525 leave_mm(smp_processor_id());
528 if (!cpus_empty(mm->cpu_vm_mask))
529 xen_smp_call_function_mask(mm->cpu_vm_mask, drop_other_mm_ref,
533 static void drop_mm_ref(struct mm_struct *mm)
535 if (current->active_mm == mm)
536 load_cr3(swapper_pg_dir);
541 * While a process runs, Xen pins its pagetables, which means that the
542 * hypervisor forces it to be read-only, and it controls all updates
543 * to it. This means that all pagetable updates have to go via the
544 * hypervisor, which is moderately expensive.
546 * Since we're pulling the pagetable down, we switch to use init_mm,
547 * unpin old process pagetable and mark it all read-write, which
548 * allows further operations on it to be simple memory accesses.
550 * The only subtle point is that another CPU may be still using the
551 * pagetable because of lazy tlb flushing. This means we need need to
552 * switch all CPUs off this pagetable before we can unpin it.
554 void xen_exit_mmap(struct mm_struct *mm)
556 get_cpu(); /* make sure we don't move around */
560 spin_lock(&mm->page_table_lock);
562 /* pgd may not be pinned in the error exit path of execve */
563 if (PagePinned(virt_to_page(mm->pgd)))
564 xen_pgd_unpin(mm->pgd);
565 spin_unlock(&mm->page_table_lock);