2 * Kernel-based Virtual Machine driver for Linux
4 * derived from drivers/kvm/kvm_main.c
6 * Copyright (C) 2006 Qumranet, Inc.
9 * Avi Kivity <avi@qumranet.com>
10 * Yaniv Kamay <yaniv@qumranet.com>
12 * This work is licensed under the terms of the GNU GPL, version 2. See
13 * the COPYING file in the top-level directory.
19 #include "segment_descriptor.h"
22 #include <linux/kvm.h>
24 #include <linux/vmalloc.h>
25 #include <linux/module.h>
27 #include <asm/uaccess.h>
29 #define MAX_IO_MSRS 256
31 unsigned long segment_base(u16 selector)
33 struct descriptor_table gdt;
34 struct segment_descriptor *d;
35 unsigned long table_base;
41 asm("sgdt %0" : "=m"(gdt));
42 table_base = gdt.base;
44 if (selector & 4) { /* from ldt */
47 asm("sldt %0" : "=g"(ldt_selector));
48 table_base = segment_base(ldt_selector);
50 d = (struct segment_descriptor *)(table_base + (selector & ~7));
51 v = d->base_low | ((unsigned long)d->base_mid << 16) |
52 ((unsigned long)d->base_high << 24);
54 if (d->system == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
55 v |= ((unsigned long) \
56 ((struct segment_descriptor_64 *)d)->base_higher) << 32;
60 EXPORT_SYMBOL_GPL(segment_base);
62 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
64 if (irqchip_in_kernel(vcpu->kvm))
65 return vcpu->apic_base;
67 return vcpu->apic_base;
69 EXPORT_SYMBOL_GPL(kvm_get_apic_base);
71 void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
73 /* TODO: reserve bits check */
74 if (irqchip_in_kernel(vcpu->kvm))
75 kvm_lapic_set_base(vcpu, data);
77 vcpu->apic_base = data;
79 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
82 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
83 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
85 * This list is modified at module load time to reflect the
86 * capabilities of the host cpu.
88 static u32 msrs_to_save[] = {
89 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
92 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
94 MSR_IA32_TIME_STAMP_COUNTER,
97 static unsigned num_msrs_to_save;
99 static u32 emulated_msrs[] = {
100 MSR_IA32_MISC_ENABLE,
104 * Adapt set_msr() to msr_io()'s calling convention
106 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
108 return kvm_set_msr(vcpu, index, *data);
112 * Read or write a bunch of msrs. All parameters are kernel addresses.
114 * @return number of msrs set successfully.
116 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
117 struct kvm_msr_entry *entries,
118 int (*do_msr)(struct kvm_vcpu *vcpu,
119 unsigned index, u64 *data))
125 for (i = 0; i < msrs->nmsrs; ++i)
126 if (do_msr(vcpu, entries[i].index, &entries[i].data))
135 * Read or write a bunch of msrs. Parameters are user addresses.
137 * @return number of msrs set successfully.
139 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
140 int (*do_msr)(struct kvm_vcpu *vcpu,
141 unsigned index, u64 *data),
144 struct kvm_msrs msrs;
145 struct kvm_msr_entry *entries;
150 if (copy_from_user(&msrs, user_msrs, sizeof msrs))
154 if (msrs.nmsrs >= MAX_IO_MSRS)
158 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
159 entries = vmalloc(size);
164 if (copy_from_user(entries, user_msrs->entries, size))
167 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
172 if (writeback && copy_to_user(user_msrs->entries, entries, size))
183 long kvm_arch_dev_ioctl(struct file *filp,
184 unsigned int ioctl, unsigned long arg)
186 void __user *argp = (void __user *)arg;
190 case KVM_GET_MSR_INDEX_LIST: {
191 struct kvm_msr_list __user *user_msr_list = argp;
192 struct kvm_msr_list msr_list;
196 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
199 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
200 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
203 if (n < num_msrs_to_save)
206 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
207 num_msrs_to_save * sizeof(u32)))
209 if (copy_to_user(user_msr_list->indices
210 + num_msrs_to_save * sizeof(u32),
212 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
224 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
226 kvm_x86_ops->vcpu_load(vcpu, cpu);
229 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
231 kvm_x86_ops->vcpu_put(vcpu);
234 static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
238 struct kvm_cpuid_entry *e, *entry;
240 rdmsrl(MSR_EFER, efer);
242 for (i = 0; i < vcpu->cpuid_nent; ++i) {
243 e = &vcpu->cpuid_entries[i];
244 if (e->function == 0x80000001) {
249 if (entry && (entry->edx & (1 << 20)) && !(efer & EFER_NX)) {
250 entry->edx &= ~(1 << 20);
251 printk(KERN_INFO "kvm: guest NX capability removed\n");
255 static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
256 struct kvm_cpuid *cpuid,
257 struct kvm_cpuid_entry __user *entries)
262 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
265 if (copy_from_user(&vcpu->cpuid_entries, entries,
266 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
268 vcpu->cpuid_nent = cpuid->nent;
269 cpuid_fix_nx_cap(vcpu);
276 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
277 struct kvm_lapic_state *s)
280 memcpy(s->regs, vcpu->apic->regs, sizeof *s);
286 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
287 struct kvm_lapic_state *s)
290 memcpy(vcpu->apic->regs, s->regs, sizeof *s);
291 kvm_apic_post_state_restore(vcpu);
297 long kvm_arch_vcpu_ioctl(struct file *filp,
298 unsigned int ioctl, unsigned long arg)
300 struct kvm_vcpu *vcpu = filp->private_data;
301 void __user *argp = (void __user *)arg;
305 case KVM_GET_LAPIC: {
306 struct kvm_lapic_state lapic;
308 memset(&lapic, 0, sizeof lapic);
309 r = kvm_vcpu_ioctl_get_lapic(vcpu, &lapic);
313 if (copy_to_user(argp, &lapic, sizeof lapic))
318 case KVM_SET_LAPIC: {
319 struct kvm_lapic_state lapic;
322 if (copy_from_user(&lapic, argp, sizeof lapic))
324 r = kvm_vcpu_ioctl_set_lapic(vcpu, &lapic);;
330 case KVM_SET_CPUID: {
331 struct kvm_cpuid __user *cpuid_arg = argp;
332 struct kvm_cpuid cpuid;
335 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
337 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
343 r = msr_io(vcpu, argp, kvm_get_msr, 1);
346 r = msr_io(vcpu, argp, do_set_msr, 0);
355 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
359 if (addr > (unsigned int)(-3 * PAGE_SIZE))
361 ret = kvm_x86_ops->set_tss_addr(kvm, addr);
365 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
366 u32 kvm_nr_mmu_pages)
368 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
371 mutex_lock(&kvm->lock);
373 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
374 kvm->n_requested_mmu_pages = kvm_nr_mmu_pages;
376 mutex_unlock(&kvm->lock);
380 static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
382 return kvm->n_alloc_mmu_pages;
386 * Set a new alias region. Aliases map a portion of physical memory into
387 * another portion. This is useful for memory windows, for example the PC
390 static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
391 struct kvm_memory_alias *alias)
394 struct kvm_mem_alias *p;
397 /* General sanity checks */
398 if (alias->memory_size & (PAGE_SIZE - 1))
400 if (alias->guest_phys_addr & (PAGE_SIZE - 1))
402 if (alias->slot >= KVM_ALIAS_SLOTS)
404 if (alias->guest_phys_addr + alias->memory_size
405 < alias->guest_phys_addr)
407 if (alias->target_phys_addr + alias->memory_size
408 < alias->target_phys_addr)
411 mutex_lock(&kvm->lock);
413 p = &kvm->aliases[alias->slot];
414 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
415 p->npages = alias->memory_size >> PAGE_SHIFT;
416 p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
418 for (n = KVM_ALIAS_SLOTS; n > 0; --n)
419 if (kvm->aliases[n - 1].npages)
423 kvm_mmu_zap_all(kvm);
425 mutex_unlock(&kvm->lock);
433 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
438 switch (chip->chip_id) {
439 case KVM_IRQCHIP_PIC_MASTER:
440 memcpy(&chip->chip.pic,
441 &pic_irqchip(kvm)->pics[0],
442 sizeof(struct kvm_pic_state));
444 case KVM_IRQCHIP_PIC_SLAVE:
445 memcpy(&chip->chip.pic,
446 &pic_irqchip(kvm)->pics[1],
447 sizeof(struct kvm_pic_state));
449 case KVM_IRQCHIP_IOAPIC:
450 memcpy(&chip->chip.ioapic,
452 sizeof(struct kvm_ioapic_state));
461 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
466 switch (chip->chip_id) {
467 case KVM_IRQCHIP_PIC_MASTER:
468 memcpy(&pic_irqchip(kvm)->pics[0],
470 sizeof(struct kvm_pic_state));
472 case KVM_IRQCHIP_PIC_SLAVE:
473 memcpy(&pic_irqchip(kvm)->pics[1],
475 sizeof(struct kvm_pic_state));
477 case KVM_IRQCHIP_IOAPIC:
478 memcpy(ioapic_irqchip(kvm),
480 sizeof(struct kvm_ioapic_state));
486 kvm_pic_update_irq(pic_irqchip(kvm));
490 long kvm_arch_vm_ioctl(struct file *filp,
491 unsigned int ioctl, unsigned long arg)
493 struct kvm *kvm = filp->private_data;
494 void __user *argp = (void __user *)arg;
498 case KVM_SET_TSS_ADDR:
499 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
503 case KVM_SET_MEMORY_REGION: {
504 struct kvm_memory_region kvm_mem;
505 struct kvm_userspace_memory_region kvm_userspace_mem;
508 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
510 kvm_userspace_mem.slot = kvm_mem.slot;
511 kvm_userspace_mem.flags = kvm_mem.flags;
512 kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
513 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
514 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
519 case KVM_SET_NR_MMU_PAGES:
520 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
524 case KVM_GET_NR_MMU_PAGES:
525 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
527 case KVM_SET_MEMORY_ALIAS: {
528 struct kvm_memory_alias alias;
531 if (copy_from_user(&alias, argp, sizeof alias))
533 r = kvm_vm_ioctl_set_memory_alias(kvm, &alias);
538 case KVM_CREATE_IRQCHIP:
540 kvm->vpic = kvm_create_pic(kvm);
542 r = kvm_ioapic_init(kvm);
552 struct kvm_irq_level irq_event;
555 if (copy_from_user(&irq_event, argp, sizeof irq_event))
557 if (irqchip_in_kernel(kvm)) {
558 mutex_lock(&kvm->lock);
559 if (irq_event.irq < 16)
560 kvm_pic_set_irq(pic_irqchip(kvm),
563 kvm_ioapic_set_irq(kvm->vioapic,
566 mutex_unlock(&kvm->lock);
571 case KVM_GET_IRQCHIP: {
572 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
573 struct kvm_irqchip chip;
576 if (copy_from_user(&chip, argp, sizeof chip))
579 if (!irqchip_in_kernel(kvm))
581 r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
585 if (copy_to_user(argp, &chip, sizeof chip))
590 case KVM_SET_IRQCHIP: {
591 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
592 struct kvm_irqchip chip;
595 if (copy_from_user(&chip, argp, sizeof chip))
598 if (!irqchip_in_kernel(kvm))
600 r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
613 static __init void kvm_init_msr_list(void)
618 for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
619 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
622 msrs_to_save[j] = msrs_to_save[i];
625 num_msrs_to_save = j;
628 __init void kvm_arch_init(void)