1 /* World's simplest hypervisor, to test paravirt_ops and show
2 * unbelievers that virtualization is the future. Plus, it's fun! */
3 #include <linux/module.h>
4 #include <linux/stringify.h>
5 #include <linux/stddef.h>
8 #include <linux/vmalloc.h>
10 #include <linux/freezer.h>
11 #include <asm/paravirt.h>
13 #include <asm/pgtable.h>
14 #include <asm/uaccess.h>
16 #include <asm/highmem.h>
17 #include <asm/asm-offsets.h>
21 /* Found in switcher.S */
22 extern char start_switcher_text[], end_switcher_text[], switch_to_guest[];
23 extern unsigned long default_idt_entries[];
25 /* Every guest maps the core switcher code. */
26 #define SHARED_SWITCHER_PAGES \
27 DIV_ROUND_UP(end_switcher_text - start_switcher_text, PAGE_SIZE)
28 /* Pages for switcher itself, then two pages per cpu */
29 #define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * NR_CPUS)
31 /* We map at -4M for ease of mapping into the guest (one PTE page). */
32 #define SWITCHER_ADDR 0xFFC00000
34 static struct vm_struct *switcher_vma;
35 static struct page **switcher_page;
37 static int cpu_had_pge;
40 unsigned short segment;
43 /* This One Big lock protects all inter-guest data structures. */
44 DEFINE_MUTEX(lguest_lock);
45 static DEFINE_PER_CPU(struct lguest *, last_guest);
47 /* FIXME: Make dynamic. */
48 #define MAX_LGUEST_GUESTS 16
49 struct lguest lguests[MAX_LGUEST_GUESTS];
51 /* Offset from where switcher.S was compiled to where we've copied it */
52 static unsigned long switcher_offset(void)
54 return SWITCHER_ADDR - (unsigned long)start_switcher_text;
57 /* This cpu's struct lguest_pages. */
58 static struct lguest_pages *lguest_pages(unsigned int cpu)
60 return &(((struct lguest_pages *)
61 (SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]);
64 static __init int map_switcher(void)
69 switcher_page = kmalloc(sizeof(switcher_page[0])*TOTAL_SWITCHER_PAGES,
76 for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) {
77 unsigned long addr = get_zeroed_page(GFP_KERNEL);
82 switcher_page[i] = virt_to_page(addr);
85 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
86 VM_ALLOC, SWITCHER_ADDR, VMALLOC_END);
89 printk("lguest: could not map switcher pages high\n");
93 pagep = switcher_page;
94 err = map_vm_area(switcher_vma, PAGE_KERNEL, &pagep);
96 printk("lguest: map_vm_area failed: %i\n", err);
99 memcpy(switcher_vma->addr, start_switcher_text,
100 end_switcher_text - start_switcher_text);
102 /* Fix up IDT entries to point into copied text. */
103 for (i = 0; i < IDT_ENTRIES; i++)
104 default_idt_entries[i] += switcher_offset();
106 for_each_possible_cpu(i) {
107 struct lguest_pages *pages = lguest_pages(i);
108 struct lguest_ro_state *state = &pages->state;
110 /* These fields are static: rest done in copy_in_guest_info */
111 state->host_gdt_desc.size = GDT_SIZE-1;
112 state->host_gdt_desc.address = (long)get_cpu_gdt_table(i);
113 store_idt(&state->host_idt_desc);
114 state->guest_idt_desc.size = sizeof(state->guest_idt)-1;
115 state->guest_idt_desc.address = (long)&state->guest_idt;
116 state->guest_gdt_desc.size = sizeof(state->guest_gdt)-1;
117 state->guest_gdt_desc.address = (long)&state->guest_gdt;
118 state->guest_tss.esp0 = (long)(&pages->regs + 1);
119 state->guest_tss.ss0 = LGUEST_DS;
120 /* No I/O for you! */
121 state->guest_tss.io_bitmap_base = sizeof(state->guest_tss);
122 setup_default_gdt_entries(state);
123 setup_default_idt_entries(state, default_idt_entries);
125 /* Setup LGUEST segments on all cpus */
126 get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT;
127 get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT;
130 /* Initialize entry point into switcher. */
131 lguest_entry.offset = (long)switch_to_guest + switcher_offset();
132 lguest_entry.segment = LGUEST_CS;
134 printk(KERN_INFO "lguest: mapped switcher at %p\n",
139 vunmap(switcher_vma->addr);
141 i = TOTAL_SWITCHER_PAGES;
143 for (--i; i >= 0; i--)
144 __free_pages(switcher_page[i], 0);
145 kfree(switcher_page);
150 static void unmap_switcher(void)
154 vunmap(switcher_vma->addr);
155 for (i = 0; i < TOTAL_SWITCHER_PAGES; i++)
156 __free_pages(switcher_page[i], 0);
159 /* IN/OUT insns: enough to get us past boot-time probing. */
160 static int emulate_insn(struct lguest *lg)
163 unsigned int insnlen = 0, in = 0, shift = 0;
164 unsigned long physaddr = guest_pa(lg, lg->regs->eip);
166 /* This only works for addresses in linear mapping... */
167 if (lg->regs->eip < lg->page_offset)
169 lgread(lg, &insn, physaddr, 1);
171 /* Operand size prefix means it's actually for ax. */
175 lgread(lg, &insn, physaddr + insnlen, 1);
178 switch (insn & 0xFE) {
179 case 0xE4: /* in <next byte>,%al */
183 case 0xEC: /* in (%dx),%al */
187 case 0xE6: /* out %al,<next byte> */
190 case 0xEE: /* out %al,(%dx) */
198 /* Lower bit tells is whether it's a 16 or 32 bit access */
200 lg->regs->eax = 0xFFFFFFFF;
202 lg->regs->eax |= (0xFFFF << shift);
204 lg->regs->eip += insnlen;
208 int lguest_address_ok(const struct lguest *lg,
209 unsigned long addr, unsigned long len)
211 return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr);
214 /* Just like get_user, but don't let guest access lguest binary. */
215 u32 lgread_u32(struct lguest *lg, unsigned long addr)
219 /* Don't let them access lguest binary */
220 if (!lguest_address_ok(lg, addr, sizeof(val))
221 || get_user(val, (u32 __user *)addr) != 0)
222 kill_guest(lg, "bad read address %#lx", addr);
226 void lgwrite_u32(struct lguest *lg, unsigned long addr, u32 val)
228 if (!lguest_address_ok(lg, addr, sizeof(val))
229 || put_user(val, (u32 __user *)addr) != 0)
230 kill_guest(lg, "bad write address %#lx", addr);
233 void lgread(struct lguest *lg, void *b, unsigned long addr, unsigned bytes)
235 if (!lguest_address_ok(lg, addr, bytes)
236 || copy_from_user(b, (void __user *)addr, bytes) != 0) {
237 /* copy_from_user should do this, but as we rely on it... */
239 kill_guest(lg, "bad read address %#lx len %u", addr, bytes);
243 void lgwrite(struct lguest *lg, unsigned long addr, const void *b,
246 if (!lguest_address_ok(lg, addr, bytes)
247 || copy_to_user((void __user *)addr, b, bytes) != 0)
248 kill_guest(lg, "bad write address %#lx len %u", addr, bytes);
251 static void set_ts(void)
260 static void copy_in_guest_info(struct lguest *lg, struct lguest_pages *pages)
262 if (__get_cpu_var(last_guest) != lg || lg->last_pages != pages) {
263 __get_cpu_var(last_guest) = lg;
264 lg->last_pages = pages;
265 lg->changed = CHANGED_ALL;
268 /* These are pretty cheap, so we do them unconditionally. */
269 pages->state.host_cr3 = __pa(current->mm->pgd);
270 map_switcher_in_guest(lg, pages);
271 pages->state.guest_tss.esp1 = lg->esp1;
272 pages->state.guest_tss.ss1 = lg->ss1;
274 /* Copy direct trap entries. */
275 if (lg->changed & CHANGED_IDT)
276 copy_traps(lg, pages->state.guest_idt, default_idt_entries);
278 /* Copy all GDT entries but the TSS. */
279 if (lg->changed & CHANGED_GDT)
280 copy_gdt(lg, pages->state.guest_gdt);
281 /* If only the TLS entries have changed, copy them. */
282 else if (lg->changed & CHANGED_GDT_TLS)
283 copy_gdt_tls(lg, pages->state.guest_gdt);
288 static void run_guest_once(struct lguest *lg, struct lguest_pages *pages)
290 unsigned int clobber;
292 copy_in_guest_info(lg, pages);
294 /* Put eflags on stack, lcall does rest: suitable for iret return. */
295 asm volatile("pushf; lcall *lguest_entry"
296 : "=a"(clobber), "=b"(clobber)
297 : "0"(pages), "1"(__pa(lg->pgdirs[lg->pgdidx].pgdir))
298 : "memory", "%edx", "%ecx", "%edi", "%esi");
301 int run_guest(struct lguest *lg, unsigned long __user *user)
304 unsigned int cr2 = 0; /* Damn gcc */
306 /* Hypercalls first: we might have been out to userspace */
308 if (lg->dma_is_pending) {
309 if (put_user(lg->pending_dma, user) ||
310 put_user(lg->pending_key, user+1))
312 return sizeof(unsigned long)*2;
315 if (signal_pending(current))
318 /* If Waker set break_out, return to Launcher. */
322 maybe_do_interrupt(lg);
330 set_current_state(TASK_INTERRUPTIBLE);
337 /* Even if *we* don't want FPU trap, guest might... */
341 /* Don't let Guest do SYSENTER: we can't handle it. */
342 if (boot_cpu_has(X86_FEATURE_SEP))
343 wrmsr(MSR_IA32_SYSENTER_CS, 0, 0);
345 run_guest_once(lg, lguest_pages(raw_smp_processor_id()));
347 /* Save cr2 now if we page-faulted. */
348 if (lg->regs->trapnum == 14)
350 else if (lg->regs->trapnum == 7)
351 math_state_restore();
353 if (boot_cpu_has(X86_FEATURE_SEP))
354 wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
357 switch (lg->regs->trapnum) {
358 case 13: /* We've intercepted a GPF. */
359 if (lg->regs->errcode == 0) {
360 if (emulate_insn(lg))
364 case 14: /* We've intercepted a page fault. */
365 if (demand_page(lg, cr2, lg->regs->errcode))
368 /* If lguest_data is NULL, this won't hurt. */
369 if (put_user(cr2, &lg->lguest_data->cr2))
370 kill_guest(lg, "Writing cr2");
372 case 7: /* We've intercepted a Device Not Available fault. */
373 /* If they don't want to know, just absorb it. */
377 case 32 ... 255: /* Real interrupt, fall thru */
379 case LGUEST_TRAP_ENTRY: /* Handled at top of loop */
383 if (deliver_trap(lg, lg->regs->trapnum))
386 kill_guest(lg, "unhandled trap %li at %#lx (%#lx)",
387 lg->regs->trapnum, lg->regs->eip,
388 lg->regs->trapnum == 14 ? cr2 : lg->regs->errcode);
393 int find_free_guest(void)
396 for (i = 0; i < MAX_LGUEST_GUESTS; i++)
402 static void adjust_pge(void *on)
405 write_cr4(read_cr4() | X86_CR4_PGE);
407 write_cr4(read_cr4() & ~X86_CR4_PGE);
410 static int __init init(void)
414 if (paravirt_enabled()) {
415 printk("lguest is afraid of %s\n", paravirt_ops.name);
419 err = map_switcher();
423 err = init_pagetables(switcher_page, SHARED_SWITCHER_PAGES);
430 err = lguest_device_init();
437 if (cpu_has_pge) { /* We have a broader idea of "global". */
439 on_each_cpu(adjust_pge, (void *)0, 0, 1);
440 clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability);
442 unlock_cpu_hotplug();
446 static void __exit fini(void)
448 lguest_device_remove();
453 set_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability);
454 on_each_cpu(adjust_pge, (void *)1, 0, 1);
456 unlock_cpu_hotplug();
461 MODULE_LICENSE("GPL");
462 MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>");