2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
7 * Copyright (C) 2006 Qumranet, Inc.
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
20 #include <linux/kvm.h>
21 #include <linux/module.h>
22 #include <linux/errno.h>
23 #include <linux/magic.h>
24 #include <asm/processor.h>
25 #include <linux/percpu.h>
26 #include <linux/gfp.h>
29 #include <linux/miscdevice.h>
30 #include <linux/vmalloc.h>
31 #include <asm/uaccess.h>
32 #include <linux/reboot.h>
34 #include <linux/debugfs.h>
35 #include <linux/highmem.h>
36 #include <linux/file.h>
38 #include <linux/sysdev.h>
39 #include <linux/cpu.h>
40 #include <linux/file.h>
42 #include <linux/mount.h>
43 #include <linux/sched.h>
45 #include "x86_emulate.h"
46 #include "segment_descriptor.h"
48 MODULE_AUTHOR("Qumranet");
49 MODULE_LICENSE("GPL");
51 static DEFINE_SPINLOCK(kvm_lock);
52 static LIST_HEAD(vm_list);
54 struct kvm_arch_ops *kvm_arch_ops;
56 #define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x)
58 static struct kvm_stats_debugfs_item {
61 struct dentry *dentry;
62 } debugfs_entries[] = {
63 { "pf_fixed", STAT_OFFSET(pf_fixed) },
64 { "pf_guest", STAT_OFFSET(pf_guest) },
65 { "tlb_flush", STAT_OFFSET(tlb_flush) },
66 { "invlpg", STAT_OFFSET(invlpg) },
67 { "exits", STAT_OFFSET(exits) },
68 { "io_exits", STAT_OFFSET(io_exits) },
69 { "mmio_exits", STAT_OFFSET(mmio_exits) },
70 { "signal_exits", STAT_OFFSET(signal_exits) },
71 { "irq_window", STAT_OFFSET(irq_window_exits) },
72 { "halt_exits", STAT_OFFSET(halt_exits) },
73 { "request_irq", STAT_OFFSET(request_irq_exits) },
74 { "irq_exits", STAT_OFFSET(irq_exits) },
75 { "light_exits", STAT_OFFSET(light_exits) },
76 { "efer_reload", STAT_OFFSET(efer_reload) },
80 static struct dentry *debugfs_dir;
82 struct vfsmount *kvmfs_mnt;
84 #define MAX_IO_MSRS 256
86 #define CR0_RESEVED_BITS 0xffffffff1ffaffc0ULL
87 #define LMSW_GUEST_MASK 0x0eULL
88 #define CR4_RESEVED_BITS (~((1ULL << 11) - 1))
89 #define CR8_RESEVED_BITS (~0x0fULL)
90 #define EFER_RESERVED_BITS 0xfffffffffffff2fe
93 // LDT or TSS descriptor in the GDT. 16 bytes.
94 struct segment_descriptor_64 {
95 struct segment_descriptor s;
102 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
105 static struct inode *kvmfs_inode(struct file_operations *fops)
108 struct inode *inode = new_inode(kvmfs_mnt->mnt_sb);
116 * Mark the inode dirty from the very beginning,
117 * that way it will never be moved to the dirty
118 * list because mark_inode_dirty() will think
119 * that it already _is_ on the dirty list.
121 inode->i_state = I_DIRTY;
122 inode->i_mode = S_IRUSR | S_IWUSR;
123 inode->i_uid = current->fsuid;
124 inode->i_gid = current->fsgid;
125 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
129 return ERR_PTR(error);
132 static struct file *kvmfs_file(struct inode *inode, void *private_data)
134 struct file *file = get_empty_filp();
137 return ERR_PTR(-ENFILE);
139 file->f_path.mnt = mntget(kvmfs_mnt);
140 file->f_path.dentry = d_alloc_anon(inode);
141 if (!file->f_path.dentry)
142 return ERR_PTR(-ENOMEM);
143 file->f_mapping = inode->i_mapping;
146 file->f_flags = O_RDWR;
147 file->f_op = inode->i_fop;
148 file->f_mode = FMODE_READ | FMODE_WRITE;
150 file->private_data = private_data;
154 unsigned long segment_base(u16 selector)
156 struct descriptor_table gdt;
157 struct segment_descriptor *d;
158 unsigned long table_base;
159 typedef unsigned long ul;
165 asm ("sgdt %0" : "=m"(gdt));
166 table_base = gdt.base;
168 if (selector & 4) { /* from ldt */
171 asm ("sldt %0" : "=g"(ldt_selector));
172 table_base = segment_base(ldt_selector);
174 d = (struct segment_descriptor *)(table_base + (selector & ~7));
175 v = d->base_low | ((ul)d->base_mid << 16) | ((ul)d->base_high << 24);
178 && (d->type == 2 || d->type == 9 || d->type == 11))
179 v |= ((ul)((struct segment_descriptor_64 *)d)->base_higher) << 32;
183 EXPORT_SYMBOL_GPL(segment_base);
185 static inline int valid_vcpu(int n)
187 return likely(n >= 0 && n < KVM_MAX_VCPUS);
190 int kvm_read_guest(struct kvm_vcpu *vcpu, gva_t addr, unsigned long size,
193 unsigned char *host_buf = dest;
194 unsigned long req_size = size;
202 paddr = gva_to_hpa(vcpu, addr);
204 if (is_error_hpa(paddr))
207 guest_buf = (hva_t)kmap_atomic(
208 pfn_to_page(paddr >> PAGE_SHIFT),
210 offset = addr & ~PAGE_MASK;
212 now = min(size, PAGE_SIZE - offset);
213 memcpy(host_buf, (void*)guest_buf, now);
217 kunmap_atomic((void *)(guest_buf & PAGE_MASK), KM_USER0);
219 return req_size - size;
221 EXPORT_SYMBOL_GPL(kvm_read_guest);
223 int kvm_write_guest(struct kvm_vcpu *vcpu, gva_t addr, unsigned long size,
226 unsigned char *host_buf = data;
227 unsigned long req_size = size;
236 paddr = gva_to_hpa(vcpu, addr);
238 if (is_error_hpa(paddr))
241 gfn = vcpu->mmu.gva_to_gpa(vcpu, addr) >> PAGE_SHIFT;
242 mark_page_dirty(vcpu->kvm, gfn);
243 guest_buf = (hva_t)kmap_atomic(
244 pfn_to_page(paddr >> PAGE_SHIFT), KM_USER0);
245 offset = addr & ~PAGE_MASK;
247 now = min(size, PAGE_SIZE - offset);
248 memcpy((void*)guest_buf, host_buf, now);
252 kunmap_atomic((void *)(guest_buf & PAGE_MASK), KM_USER0);
254 return req_size - size;
256 EXPORT_SYMBOL_GPL(kvm_write_guest);
258 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
260 if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
263 vcpu->guest_fpu_loaded = 1;
264 fx_save(vcpu->host_fx_image);
265 fx_restore(vcpu->guest_fx_image);
267 EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
269 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
271 if (!vcpu->guest_fpu_loaded)
274 vcpu->guest_fpu_loaded = 0;
275 fx_save(vcpu->guest_fx_image);
276 fx_restore(vcpu->host_fx_image);
278 EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
281 * Switches to specified vcpu, until a matching vcpu_put()
283 static void vcpu_load(struct kvm_vcpu *vcpu)
285 mutex_lock(&vcpu->mutex);
286 kvm_arch_ops->vcpu_load(vcpu);
290 * Switches to specified vcpu, until a matching vcpu_put(). Will return NULL
291 * if the slot is not populated.
293 static struct kvm_vcpu *vcpu_load_slot(struct kvm *kvm, int slot)
295 struct kvm_vcpu *vcpu = &kvm->vcpus[slot];
297 mutex_lock(&vcpu->mutex);
299 mutex_unlock(&vcpu->mutex);
302 kvm_arch_ops->vcpu_load(vcpu);
306 static void vcpu_put(struct kvm_vcpu *vcpu)
308 kvm_arch_ops->vcpu_put(vcpu);
309 mutex_unlock(&vcpu->mutex);
312 static struct kvm *kvm_create_vm(void)
314 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
318 return ERR_PTR(-ENOMEM);
320 spin_lock_init(&kvm->lock);
321 INIT_LIST_HEAD(&kvm->active_mmu_pages);
322 spin_lock(&kvm_lock);
323 list_add(&kvm->vm_list, &vm_list);
324 spin_unlock(&kvm_lock);
325 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
326 struct kvm_vcpu *vcpu = &kvm->vcpus[i];
328 mutex_init(&vcpu->mutex);
331 vcpu->mmu.root_hpa = INVALID_PAGE;
336 static int kvm_dev_open(struct inode *inode, struct file *filp)
342 * Free any memory in @free but not in @dont.
344 static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
345 struct kvm_memory_slot *dont)
349 if (!dont || free->phys_mem != dont->phys_mem)
350 if (free->phys_mem) {
351 for (i = 0; i < free->npages; ++i)
352 if (free->phys_mem[i])
353 __free_page(free->phys_mem[i]);
354 vfree(free->phys_mem);
357 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
358 vfree(free->dirty_bitmap);
360 free->phys_mem = NULL;
362 free->dirty_bitmap = NULL;
365 static void kvm_free_physmem(struct kvm *kvm)
369 for (i = 0; i < kvm->nmemslots; ++i)
370 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
373 static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
377 for (i = 0; i < 2; ++i)
378 if (vcpu->pio.guest_pages[i]) {
379 __free_page(vcpu->pio.guest_pages[i]);
380 vcpu->pio.guest_pages[i] = NULL;
384 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
390 kvm_mmu_unload(vcpu);
394 static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
400 kvm_mmu_destroy(vcpu);
402 kvm_arch_ops->vcpu_free(vcpu);
403 free_page((unsigned long)vcpu->run);
405 free_page((unsigned long)vcpu->pio_data);
406 vcpu->pio_data = NULL;
407 free_pio_guest_pages(vcpu);
410 static void kvm_free_vcpus(struct kvm *kvm)
415 * Unpin any mmu pages first.
417 for (i = 0; i < KVM_MAX_VCPUS; ++i)
418 kvm_unload_vcpu_mmu(&kvm->vcpus[i]);
419 for (i = 0; i < KVM_MAX_VCPUS; ++i)
420 kvm_free_vcpu(&kvm->vcpus[i]);
423 static int kvm_dev_release(struct inode *inode, struct file *filp)
428 static void kvm_destroy_vm(struct kvm *kvm)
430 spin_lock(&kvm_lock);
431 list_del(&kvm->vm_list);
432 spin_unlock(&kvm_lock);
434 kvm_free_physmem(kvm);
438 static int kvm_vm_release(struct inode *inode, struct file *filp)
440 struct kvm *kvm = filp->private_data;
446 static void inject_gp(struct kvm_vcpu *vcpu)
448 kvm_arch_ops->inject_gp(vcpu, 0);
452 * Load the pae pdptrs. Return true is they are all valid.
454 static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
456 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
457 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
464 spin_lock(&vcpu->kvm->lock);
465 page = gfn_to_page(vcpu->kvm, pdpt_gfn);
466 /* FIXME: !page - emulate? 0xff? */
467 pdpt = kmap_atomic(page, KM_USER0);
470 for (i = 0; i < 4; ++i) {
471 pdpte = pdpt[offset + i];
472 if ((pdpte & 1) && (pdpte & 0xfffffff0000001e6ull)) {
478 for (i = 0; i < 4; ++i)
479 vcpu->pdptrs[i] = pdpt[offset + i];
482 kunmap_atomic(pdpt, KM_USER0);
483 spin_unlock(&vcpu->kvm->lock);
488 void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
490 if (cr0 & CR0_RESEVED_BITS) {
491 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
497 if ((cr0 & CR0_NW_MASK) && !(cr0 & CR0_CD_MASK)) {
498 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
503 if ((cr0 & CR0_PG_MASK) && !(cr0 & CR0_PE_MASK)) {
504 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
505 "and a clear PE flag\n");
510 if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) {
512 if ((vcpu->shadow_efer & EFER_LME)) {
516 printk(KERN_DEBUG "set_cr0: #GP, start paging "
517 "in long mode while PAE is disabled\n");
521 kvm_arch_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
523 printk(KERN_DEBUG "set_cr0: #GP, start paging "
524 "in long mode while CS.L == 1\n");
531 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) {
532 printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
540 kvm_arch_ops->set_cr0(vcpu, cr0);
543 spin_lock(&vcpu->kvm->lock);
544 kvm_mmu_reset_context(vcpu);
545 spin_unlock(&vcpu->kvm->lock);
548 EXPORT_SYMBOL_GPL(set_cr0);
550 void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
552 set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f));
554 EXPORT_SYMBOL_GPL(lmsw);
556 void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
558 if (cr4 & CR4_RESEVED_BITS) {
559 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
564 if (is_long_mode(vcpu)) {
565 if (!(cr4 & CR4_PAE_MASK)) {
566 printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
571 } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & CR4_PAE_MASK)
572 && !load_pdptrs(vcpu, vcpu->cr3)) {
573 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
577 if (cr4 & CR4_VMXE_MASK) {
578 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
582 kvm_arch_ops->set_cr4(vcpu, cr4);
583 spin_lock(&vcpu->kvm->lock);
584 kvm_mmu_reset_context(vcpu);
585 spin_unlock(&vcpu->kvm->lock);
587 EXPORT_SYMBOL_GPL(set_cr4);
589 void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
591 if (is_long_mode(vcpu)) {
592 if (cr3 & CR3_L_MODE_RESEVED_BITS) {
593 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
598 if (cr3 & CR3_RESEVED_BITS) {
599 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
603 if (is_paging(vcpu) && is_pae(vcpu) &&
604 !load_pdptrs(vcpu, cr3)) {
605 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
613 spin_lock(&vcpu->kvm->lock);
615 * Does the new cr3 value map to physical memory? (Note, we
616 * catch an invalid cr3 even in real-mode, because it would
617 * cause trouble later on when we turn on paging anyway.)
619 * A real CPU would silently accept an invalid cr3 and would
620 * attempt to use it - with largely undefined (and often hard
621 * to debug) behavior on the guest side.
623 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
626 vcpu->mmu.new_cr3(vcpu);
627 spin_unlock(&vcpu->kvm->lock);
629 EXPORT_SYMBOL_GPL(set_cr3);
631 void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
633 if ( cr8 & CR8_RESEVED_BITS) {
634 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
640 EXPORT_SYMBOL_GPL(set_cr8);
642 void fx_init(struct kvm_vcpu *vcpu)
644 struct __attribute__ ((__packed__)) fx_image_s {
650 u64 operand;// fpu dp
656 fx_save(vcpu->host_fx_image);
658 fx_save(vcpu->guest_fx_image);
659 fx_restore(vcpu->host_fx_image);
661 fx_image = (struct fx_image_s *)vcpu->guest_fx_image;
662 fx_image->mxcsr = 0x1f80;
663 memset(vcpu->guest_fx_image + sizeof(struct fx_image_s),
664 0, FX_IMAGE_SIZE - sizeof(struct fx_image_s));
666 EXPORT_SYMBOL_GPL(fx_init);
668 static void do_remove_write_access(struct kvm_vcpu *vcpu, int slot)
670 spin_lock(&vcpu->kvm->lock);
671 kvm_mmu_slot_remove_write_access(vcpu, slot);
672 spin_unlock(&vcpu->kvm->lock);
676 * Allocate some memory and give it an address in the guest physical address
679 * Discontiguous memory is allowed, mostly for framebuffers.
681 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
682 struct kvm_memory_region *mem)
686 unsigned long npages;
688 struct kvm_memory_slot *memslot;
689 struct kvm_memory_slot old, new;
690 int memory_config_version;
693 /* General sanity checks */
694 if (mem->memory_size & (PAGE_SIZE - 1))
696 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
698 if (mem->slot >= KVM_MEMORY_SLOTS)
700 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
703 memslot = &kvm->memslots[mem->slot];
704 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
705 npages = mem->memory_size >> PAGE_SHIFT;
708 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
711 spin_lock(&kvm->lock);
713 memory_config_version = kvm->memory_config_version;
714 new = old = *memslot;
716 new.base_gfn = base_gfn;
718 new.flags = mem->flags;
720 /* Disallow changing a memory slot's size. */
722 if (npages && old.npages && npages != old.npages)
725 /* Check for overlaps */
727 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
728 struct kvm_memory_slot *s = &kvm->memslots[i];
732 if (!((base_gfn + npages <= s->base_gfn) ||
733 (base_gfn >= s->base_gfn + s->npages)))
737 * Do memory allocations outside lock. memory_config_version will
740 spin_unlock(&kvm->lock);
742 /* Deallocate if slot is being removed */
746 /* Free page dirty bitmap if unneeded */
747 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
748 new.dirty_bitmap = NULL;
752 /* Allocate if a slot is being created */
753 if (npages && !new.phys_mem) {
754 new.phys_mem = vmalloc(npages * sizeof(struct page *));
759 memset(new.phys_mem, 0, npages * sizeof(struct page *));
760 for (i = 0; i < npages; ++i) {
761 new.phys_mem[i] = alloc_page(GFP_HIGHUSER
763 if (!new.phys_mem[i])
765 set_page_private(new.phys_mem[i],0);
769 /* Allocate page dirty bitmap if needed */
770 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
771 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
773 new.dirty_bitmap = vmalloc(dirty_bytes);
774 if (!new.dirty_bitmap)
776 memset(new.dirty_bitmap, 0, dirty_bytes);
779 spin_lock(&kvm->lock);
781 if (memory_config_version != kvm->memory_config_version) {
782 spin_unlock(&kvm->lock);
783 kvm_free_physmem_slot(&new, &old);
791 if (mem->slot >= kvm->nmemslots)
792 kvm->nmemslots = mem->slot + 1;
795 ++kvm->memory_config_version;
797 spin_unlock(&kvm->lock);
799 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
800 struct kvm_vcpu *vcpu;
802 vcpu = vcpu_load_slot(kvm, i);
805 if (new.flags & KVM_MEM_LOG_DIRTY_PAGES)
806 do_remove_write_access(vcpu, mem->slot);
807 kvm_mmu_reset_context(vcpu);
811 kvm_free_physmem_slot(&old, &new);
815 spin_unlock(&kvm->lock);
817 kvm_free_physmem_slot(&new, &old);
823 * Get (and clear) the dirty memory log for a memory slot.
825 static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
826 struct kvm_dirty_log *log)
828 struct kvm_memory_slot *memslot;
832 unsigned long any = 0;
834 spin_lock(&kvm->lock);
837 * Prevent changes to guest memory configuration even while the lock
841 spin_unlock(&kvm->lock);
843 if (log->slot >= KVM_MEMORY_SLOTS)
846 memslot = &kvm->memslots[log->slot];
848 if (!memslot->dirty_bitmap)
851 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
853 for (i = 0; !any && i < n/sizeof(long); ++i)
854 any = memslot->dirty_bitmap[i];
857 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
862 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
863 struct kvm_vcpu *vcpu;
865 vcpu = vcpu_load_slot(kvm, i);
869 do_remove_write_access(vcpu, log->slot);
870 memset(memslot->dirty_bitmap, 0, n);
873 kvm_arch_ops->tlb_flush(vcpu);
881 spin_lock(&kvm->lock);
883 spin_unlock(&kvm->lock);
888 * Set a new alias region. Aliases map a portion of physical memory into
889 * another portion. This is useful for memory windows, for example the PC
892 static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
893 struct kvm_memory_alias *alias)
896 struct kvm_mem_alias *p;
899 /* General sanity checks */
900 if (alias->memory_size & (PAGE_SIZE - 1))
902 if (alias->guest_phys_addr & (PAGE_SIZE - 1))
904 if (alias->slot >= KVM_ALIAS_SLOTS)
906 if (alias->guest_phys_addr + alias->memory_size
907 < alias->guest_phys_addr)
909 if (alias->target_phys_addr + alias->memory_size
910 < alias->target_phys_addr)
913 spin_lock(&kvm->lock);
915 p = &kvm->aliases[alias->slot];
916 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
917 p->npages = alias->memory_size >> PAGE_SHIFT;
918 p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
920 for (n = KVM_ALIAS_SLOTS; n > 0; --n)
921 if (kvm->aliases[n - 1].npages)
925 spin_unlock(&kvm->lock);
927 vcpu_load(&kvm->vcpus[0]);
928 spin_lock(&kvm->lock);
929 kvm_mmu_zap_all(&kvm->vcpus[0]);
930 spin_unlock(&kvm->lock);
931 vcpu_put(&kvm->vcpus[0]);
939 static gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
942 struct kvm_mem_alias *alias;
944 for (i = 0; i < kvm->naliases; ++i) {
945 alias = &kvm->aliases[i];
946 if (gfn >= alias->base_gfn
947 && gfn < alias->base_gfn + alias->npages)
948 return alias->target_gfn + gfn - alias->base_gfn;
953 static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
957 for (i = 0; i < kvm->nmemslots; ++i) {
958 struct kvm_memory_slot *memslot = &kvm->memslots[i];
960 if (gfn >= memslot->base_gfn
961 && gfn < memslot->base_gfn + memslot->npages)
967 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
969 gfn = unalias_gfn(kvm, gfn);
970 return __gfn_to_memslot(kvm, gfn);
973 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
975 struct kvm_memory_slot *slot;
977 gfn = unalias_gfn(kvm, gfn);
978 slot = __gfn_to_memslot(kvm, gfn);
981 return slot->phys_mem[gfn - slot->base_gfn];
983 EXPORT_SYMBOL_GPL(gfn_to_page);
985 void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
988 struct kvm_memory_slot *memslot;
989 unsigned long rel_gfn;
991 for (i = 0; i < kvm->nmemslots; ++i) {
992 memslot = &kvm->memslots[i];
994 if (gfn >= memslot->base_gfn
995 && gfn < memslot->base_gfn + memslot->npages) {
997 if (!memslot->dirty_bitmap)
1000 rel_gfn = gfn - memslot->base_gfn;
1003 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
1004 set_bit(rel_gfn, memslot->dirty_bitmap);
1010 static int emulator_read_std(unsigned long addr,
1013 struct x86_emulate_ctxt *ctxt)
1015 struct kvm_vcpu *vcpu = ctxt->vcpu;
1019 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1020 unsigned offset = addr & (PAGE_SIZE-1);
1021 unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
1026 if (gpa == UNMAPPED_GVA)
1027 return X86EMUL_PROPAGATE_FAULT;
1028 pfn = gpa >> PAGE_SHIFT;
1029 page = gfn_to_page(vcpu->kvm, pfn);
1031 return X86EMUL_UNHANDLEABLE;
1032 page_virt = kmap_atomic(page, KM_USER0);
1034 memcpy(data, page_virt + offset, tocopy);
1036 kunmap_atomic(page_virt, KM_USER0);
1043 return X86EMUL_CONTINUE;
1046 static int emulator_write_std(unsigned long addr,
1049 struct x86_emulate_ctxt *ctxt)
1051 printk(KERN_ERR "emulator_write_std: addr %lx n %d\n",
1053 return X86EMUL_UNHANDLEABLE;
1056 static int emulator_read_emulated(unsigned long addr,
1059 struct x86_emulate_ctxt *ctxt)
1061 struct kvm_vcpu *vcpu = ctxt->vcpu;
1063 if (vcpu->mmio_read_completed) {
1064 memcpy(val, vcpu->mmio_data, bytes);
1065 vcpu->mmio_read_completed = 0;
1066 return X86EMUL_CONTINUE;
1067 } else if (emulator_read_std(addr, val, bytes, ctxt)
1068 == X86EMUL_CONTINUE)
1069 return X86EMUL_CONTINUE;
1071 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1073 if (gpa == UNMAPPED_GVA)
1074 return X86EMUL_PROPAGATE_FAULT;
1075 vcpu->mmio_needed = 1;
1076 vcpu->mmio_phys_addr = gpa;
1077 vcpu->mmio_size = bytes;
1078 vcpu->mmio_is_write = 0;
1080 return X86EMUL_UNHANDLEABLE;
1084 static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1085 const void *val, int bytes)
1089 unsigned offset = offset_in_page(gpa);
1091 if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT))
1093 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1096 mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT);
1097 virt = kmap_atomic(page, KM_USER0);
1098 kvm_mmu_pte_write(vcpu, gpa, virt + offset, val, bytes);
1099 memcpy(virt + offset_in_page(gpa), val, bytes);
1100 kunmap_atomic(virt, KM_USER0);
1104 static int emulator_write_emulated(unsigned long addr,
1107 struct x86_emulate_ctxt *ctxt)
1109 struct kvm_vcpu *vcpu = ctxt->vcpu;
1110 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1112 if (gpa == UNMAPPED_GVA) {
1113 kvm_arch_ops->inject_page_fault(vcpu, addr, 2);
1114 return X86EMUL_PROPAGATE_FAULT;
1117 if (emulator_write_phys(vcpu, gpa, val, bytes))
1118 return X86EMUL_CONTINUE;
1120 vcpu->mmio_needed = 1;
1121 vcpu->mmio_phys_addr = gpa;
1122 vcpu->mmio_size = bytes;
1123 vcpu->mmio_is_write = 1;
1124 memcpy(vcpu->mmio_data, val, bytes);
1126 return X86EMUL_CONTINUE;
1129 static int emulator_cmpxchg_emulated(unsigned long addr,
1133 struct x86_emulate_ctxt *ctxt)
1135 static int reported;
1139 printk(KERN_WARNING "kvm: emulating exchange as write\n");
1141 return emulator_write_emulated(addr, new, bytes, ctxt);
1144 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
1146 return kvm_arch_ops->get_segment_base(vcpu, seg);
1149 int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
1151 return X86EMUL_CONTINUE;
1154 int emulate_clts(struct kvm_vcpu *vcpu)
1158 cr0 = vcpu->cr0 & ~CR0_TS_MASK;
1159 kvm_arch_ops->set_cr0(vcpu, cr0);
1160 return X86EMUL_CONTINUE;
1163 int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr, unsigned long *dest)
1165 struct kvm_vcpu *vcpu = ctxt->vcpu;
1169 *dest = kvm_arch_ops->get_dr(vcpu, dr);
1170 return X86EMUL_CONTINUE;
1172 printk(KERN_DEBUG "%s: unexpected dr %u\n",
1174 return X86EMUL_UNHANDLEABLE;
1178 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
1180 unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
1183 kvm_arch_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
1185 /* FIXME: better handling */
1186 return X86EMUL_UNHANDLEABLE;
1188 return X86EMUL_CONTINUE;
1191 static void report_emulation_failure(struct x86_emulate_ctxt *ctxt)
1193 static int reported;
1195 unsigned long rip = ctxt->vcpu->rip;
1196 unsigned long rip_linear;
1198 rip_linear = rip + get_segment_base(ctxt->vcpu, VCPU_SREG_CS);
1203 emulator_read_std(rip_linear, (void *)opcodes, 4, ctxt);
1205 printk(KERN_ERR "emulation failed but !mmio_needed?"
1206 " rip %lx %02x %02x %02x %02x\n",
1207 rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
1211 struct x86_emulate_ops emulate_ops = {
1212 .read_std = emulator_read_std,
1213 .write_std = emulator_write_std,
1214 .read_emulated = emulator_read_emulated,
1215 .write_emulated = emulator_write_emulated,
1216 .cmpxchg_emulated = emulator_cmpxchg_emulated,
1219 int emulate_instruction(struct kvm_vcpu *vcpu,
1220 struct kvm_run *run,
1224 struct x86_emulate_ctxt emulate_ctxt;
1228 vcpu->mmio_fault_cr2 = cr2;
1229 kvm_arch_ops->cache_regs(vcpu);
1231 kvm_arch_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
1233 emulate_ctxt.vcpu = vcpu;
1234 emulate_ctxt.eflags = kvm_arch_ops->get_rflags(vcpu);
1235 emulate_ctxt.cr2 = cr2;
1236 emulate_ctxt.mode = (emulate_ctxt.eflags & X86_EFLAGS_VM)
1237 ? X86EMUL_MODE_REAL : cs_l
1238 ? X86EMUL_MODE_PROT64 : cs_db
1239 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
1241 if (emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
1242 emulate_ctxt.cs_base = 0;
1243 emulate_ctxt.ds_base = 0;
1244 emulate_ctxt.es_base = 0;
1245 emulate_ctxt.ss_base = 0;
1247 emulate_ctxt.cs_base = get_segment_base(vcpu, VCPU_SREG_CS);
1248 emulate_ctxt.ds_base = get_segment_base(vcpu, VCPU_SREG_DS);
1249 emulate_ctxt.es_base = get_segment_base(vcpu, VCPU_SREG_ES);
1250 emulate_ctxt.ss_base = get_segment_base(vcpu, VCPU_SREG_SS);
1253 emulate_ctxt.gs_base = get_segment_base(vcpu, VCPU_SREG_GS);
1254 emulate_ctxt.fs_base = get_segment_base(vcpu, VCPU_SREG_FS);
1256 vcpu->mmio_is_write = 0;
1257 r = x86_emulate_memop(&emulate_ctxt, &emulate_ops);
1259 if ((r || vcpu->mmio_is_write) && run) {
1260 run->mmio.phys_addr = vcpu->mmio_phys_addr;
1261 memcpy(run->mmio.data, vcpu->mmio_data, 8);
1262 run->mmio.len = vcpu->mmio_size;
1263 run->mmio.is_write = vcpu->mmio_is_write;
1267 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
1268 return EMULATE_DONE;
1269 if (!vcpu->mmio_needed) {
1270 report_emulation_failure(&emulate_ctxt);
1271 return EMULATE_FAIL;
1273 return EMULATE_DO_MMIO;
1276 kvm_arch_ops->decache_regs(vcpu);
1277 kvm_arch_ops->set_rflags(vcpu, emulate_ctxt.eflags);
1279 if (vcpu->mmio_is_write) {
1280 vcpu->mmio_needed = 0;
1281 return EMULATE_DO_MMIO;
1284 return EMULATE_DONE;
1286 EXPORT_SYMBOL_GPL(emulate_instruction);
1288 int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run)
1290 unsigned long nr, a0, a1, a2, a3, a4, a5, ret;
1292 kvm_arch_ops->cache_regs(vcpu);
1294 #ifdef CONFIG_X86_64
1295 if (is_long_mode(vcpu)) {
1296 nr = vcpu->regs[VCPU_REGS_RAX];
1297 a0 = vcpu->regs[VCPU_REGS_RDI];
1298 a1 = vcpu->regs[VCPU_REGS_RSI];
1299 a2 = vcpu->regs[VCPU_REGS_RDX];
1300 a3 = vcpu->regs[VCPU_REGS_RCX];
1301 a4 = vcpu->regs[VCPU_REGS_R8];
1302 a5 = vcpu->regs[VCPU_REGS_R9];
1306 nr = vcpu->regs[VCPU_REGS_RBX] & -1u;
1307 a0 = vcpu->regs[VCPU_REGS_RAX] & -1u;
1308 a1 = vcpu->regs[VCPU_REGS_RCX] & -1u;
1309 a2 = vcpu->regs[VCPU_REGS_RDX] & -1u;
1310 a3 = vcpu->regs[VCPU_REGS_RSI] & -1u;
1311 a4 = vcpu->regs[VCPU_REGS_RDI] & -1u;
1312 a5 = vcpu->regs[VCPU_REGS_RBP] & -1u;
1316 run->hypercall.args[0] = a0;
1317 run->hypercall.args[1] = a1;
1318 run->hypercall.args[2] = a2;
1319 run->hypercall.args[3] = a3;
1320 run->hypercall.args[4] = a4;
1321 run->hypercall.args[5] = a5;
1322 run->hypercall.ret = ret;
1323 run->hypercall.longmode = is_long_mode(vcpu);
1324 kvm_arch_ops->decache_regs(vcpu);
1327 vcpu->regs[VCPU_REGS_RAX] = ret;
1328 kvm_arch_ops->decache_regs(vcpu);
1331 EXPORT_SYMBOL_GPL(kvm_hypercall);
1333 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
1335 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
1338 void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
1340 struct descriptor_table dt = { limit, base };
1342 kvm_arch_ops->set_gdt(vcpu, &dt);
1345 void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
1347 struct descriptor_table dt = { limit, base };
1349 kvm_arch_ops->set_idt(vcpu, &dt);
1352 void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
1353 unsigned long *rflags)
1356 *rflags = kvm_arch_ops->get_rflags(vcpu);
1359 unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
1361 kvm_arch_ops->decache_cr4_guest_bits(vcpu);
1372 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
1377 void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
1378 unsigned long *rflags)
1382 set_cr0(vcpu, mk_cr_64(vcpu->cr0, val));
1383 *rflags = kvm_arch_ops->get_rflags(vcpu);
1392 set_cr4(vcpu, mk_cr_64(vcpu->cr4, val));
1395 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
1400 * Register the para guest with the host:
1402 static int vcpu_register_para(struct kvm_vcpu *vcpu, gpa_t para_state_gpa)
1404 struct kvm_vcpu_para_state *para_state;
1405 hpa_t para_state_hpa, hypercall_hpa;
1406 struct page *para_state_page;
1407 unsigned char *hypercall;
1408 gpa_t hypercall_gpa;
1410 printk(KERN_DEBUG "kvm: guest trying to enter paravirtual mode\n");
1411 printk(KERN_DEBUG ".... para_state_gpa: %08Lx\n", para_state_gpa);
1414 * Needs to be page aligned:
1416 if (para_state_gpa != PAGE_ALIGN(para_state_gpa))
1419 para_state_hpa = gpa_to_hpa(vcpu, para_state_gpa);
1420 printk(KERN_DEBUG ".... para_state_hpa: %08Lx\n", para_state_hpa);
1421 if (is_error_hpa(para_state_hpa))
1424 mark_page_dirty(vcpu->kvm, para_state_gpa >> PAGE_SHIFT);
1425 para_state_page = pfn_to_page(para_state_hpa >> PAGE_SHIFT);
1426 para_state = kmap_atomic(para_state_page, KM_USER0);
1428 printk(KERN_DEBUG ".... guest version: %d\n", para_state->guest_version);
1429 printk(KERN_DEBUG ".... size: %d\n", para_state->size);
1431 para_state->host_version = KVM_PARA_API_VERSION;
1433 * We cannot support guests that try to register themselves
1434 * with a newer API version than the host supports:
1436 if (para_state->guest_version > KVM_PARA_API_VERSION) {
1437 para_state->ret = -KVM_EINVAL;
1438 goto err_kunmap_skip;
1441 hypercall_gpa = para_state->hypercall_gpa;
1442 hypercall_hpa = gpa_to_hpa(vcpu, hypercall_gpa);
1443 printk(KERN_DEBUG ".... hypercall_hpa: %08Lx\n", hypercall_hpa);
1444 if (is_error_hpa(hypercall_hpa)) {
1445 para_state->ret = -KVM_EINVAL;
1446 goto err_kunmap_skip;
1449 printk(KERN_DEBUG "kvm: para guest successfully registered.\n");
1450 vcpu->para_state_page = para_state_page;
1451 vcpu->para_state_gpa = para_state_gpa;
1452 vcpu->hypercall_gpa = hypercall_gpa;
1454 mark_page_dirty(vcpu->kvm, hypercall_gpa >> PAGE_SHIFT);
1455 hypercall = kmap_atomic(pfn_to_page(hypercall_hpa >> PAGE_SHIFT),
1456 KM_USER1) + (hypercall_hpa & ~PAGE_MASK);
1457 kvm_arch_ops->patch_hypercall(vcpu, hypercall);
1458 kunmap_atomic(hypercall, KM_USER1);
1460 para_state->ret = 0;
1462 kunmap_atomic(para_state, KM_USER0);
1468 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1473 case 0xc0010010: /* SYSCFG */
1474 case 0xc0010015: /* HWCR */
1475 case MSR_IA32_PLATFORM_ID:
1476 case MSR_IA32_P5_MC_ADDR:
1477 case MSR_IA32_P5_MC_TYPE:
1478 case MSR_IA32_MC0_CTL:
1479 case MSR_IA32_MCG_STATUS:
1480 case MSR_IA32_MCG_CAP:
1481 case MSR_IA32_MC0_MISC:
1482 case MSR_IA32_MC0_MISC+4:
1483 case MSR_IA32_MC0_MISC+8:
1484 case MSR_IA32_MC0_MISC+12:
1485 case MSR_IA32_MC0_MISC+16:
1486 case MSR_IA32_UCODE_REV:
1487 case MSR_IA32_PERF_STATUS:
1488 case MSR_IA32_EBL_CR_POWERON:
1489 /* MTRR registers */
1491 case 0x200 ... 0x2ff:
1494 case 0xcd: /* fsb frequency */
1497 case MSR_IA32_APICBASE:
1498 data = vcpu->apic_base;
1500 case MSR_IA32_MISC_ENABLE:
1501 data = vcpu->ia32_misc_enable_msr;
1503 #ifdef CONFIG_X86_64
1505 data = vcpu->shadow_efer;
1509 printk(KERN_ERR "kvm: unhandled rdmsr: 0x%x\n", msr);
1515 EXPORT_SYMBOL_GPL(kvm_get_msr_common);
1518 * Reads an msr value (of 'msr_index') into 'pdata'.
1519 * Returns 0 on success, non-0 otherwise.
1520 * Assumes vcpu_load() was already called.
1522 static int get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
1524 return kvm_arch_ops->get_msr(vcpu, msr_index, pdata);
1527 #ifdef CONFIG_X86_64
1529 static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
1531 if (efer & EFER_RESERVED_BITS) {
1532 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
1539 && (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) {
1540 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
1545 kvm_arch_ops->set_efer(vcpu, efer);
1548 efer |= vcpu->shadow_efer & EFER_LMA;
1550 vcpu->shadow_efer = efer;
1555 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1558 #ifdef CONFIG_X86_64
1560 set_efer(vcpu, data);
1563 case MSR_IA32_MC0_STATUS:
1564 printk(KERN_WARNING "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
1565 __FUNCTION__, data);
1567 case MSR_IA32_MCG_STATUS:
1568 printk(KERN_WARNING "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
1569 __FUNCTION__, data);
1571 case MSR_IA32_UCODE_REV:
1572 case MSR_IA32_UCODE_WRITE:
1573 case 0x200 ... 0x2ff: /* MTRRs */
1575 case MSR_IA32_APICBASE:
1576 vcpu->apic_base = data;
1578 case MSR_IA32_MISC_ENABLE:
1579 vcpu->ia32_misc_enable_msr = data;
1582 * This is the 'probe whether the host is KVM' logic:
1584 case MSR_KVM_API_MAGIC:
1585 return vcpu_register_para(vcpu, data);
1588 printk(KERN_ERR "kvm: unhandled wrmsr: 0x%x\n", msr);
1593 EXPORT_SYMBOL_GPL(kvm_set_msr_common);
1596 * Writes msr value into into the appropriate "register".
1597 * Returns 0 on success, non-0 otherwise.
1598 * Assumes vcpu_load() was already called.
1600 static int set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1602 return kvm_arch_ops->set_msr(vcpu, msr_index, data);
1605 void kvm_resched(struct kvm_vcpu *vcpu)
1607 if (!need_resched())
1613 EXPORT_SYMBOL_GPL(kvm_resched);
1615 void load_msrs(struct vmx_msr_entry *e, int n)
1619 for (i = 0; i < n; ++i)
1620 wrmsrl(e[i].index, e[i].data);
1622 EXPORT_SYMBOL_GPL(load_msrs);
1624 void save_msrs(struct vmx_msr_entry *e, int n)
1628 for (i = 0; i < n; ++i)
1629 rdmsrl(e[i].index, e[i].data);
1631 EXPORT_SYMBOL_GPL(save_msrs);
1633 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
1637 struct kvm_cpuid_entry *e, *best;
1639 kvm_arch_ops->cache_regs(vcpu);
1640 function = vcpu->regs[VCPU_REGS_RAX];
1641 vcpu->regs[VCPU_REGS_RAX] = 0;
1642 vcpu->regs[VCPU_REGS_RBX] = 0;
1643 vcpu->regs[VCPU_REGS_RCX] = 0;
1644 vcpu->regs[VCPU_REGS_RDX] = 0;
1646 for (i = 0; i < vcpu->cpuid_nent; ++i) {
1647 e = &vcpu->cpuid_entries[i];
1648 if (e->function == function) {
1653 * Both basic or both extended?
1655 if (((e->function ^ function) & 0x80000000) == 0)
1656 if (!best || e->function > best->function)
1660 vcpu->regs[VCPU_REGS_RAX] = best->eax;
1661 vcpu->regs[VCPU_REGS_RBX] = best->ebx;
1662 vcpu->regs[VCPU_REGS_RCX] = best->ecx;
1663 vcpu->regs[VCPU_REGS_RDX] = best->edx;
1665 kvm_arch_ops->decache_regs(vcpu);
1666 kvm_arch_ops->skip_emulated_instruction(vcpu);
1668 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
1670 static int pio_copy_data(struct kvm_vcpu *vcpu)
1672 void *p = vcpu->pio_data;
1675 int nr_pages = vcpu->pio.guest_pages[1] ? 2 : 1;
1677 kvm_arch_ops->vcpu_put(vcpu);
1678 q = vmap(vcpu->pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
1681 kvm_arch_ops->vcpu_load(vcpu);
1682 free_pio_guest_pages(vcpu);
1685 q += vcpu->pio.guest_page_offset;
1686 bytes = vcpu->pio.size * vcpu->pio.cur_count;
1688 memcpy(q, p, bytes);
1690 memcpy(p, q, bytes);
1691 q -= vcpu->pio.guest_page_offset;
1693 kvm_arch_ops->vcpu_load(vcpu);
1694 free_pio_guest_pages(vcpu);
1698 static int complete_pio(struct kvm_vcpu *vcpu)
1700 struct kvm_pio_request *io = &vcpu->pio;
1704 kvm_arch_ops->cache_regs(vcpu);
1708 memcpy(&vcpu->regs[VCPU_REGS_RAX], vcpu->pio_data,
1712 r = pio_copy_data(vcpu);
1714 kvm_arch_ops->cache_regs(vcpu);
1721 delta *= io->cur_count;
1723 * The size of the register should really depend on
1724 * current address size.
1726 vcpu->regs[VCPU_REGS_RCX] -= delta;
1732 vcpu->regs[VCPU_REGS_RDI] += delta;
1734 vcpu->regs[VCPU_REGS_RSI] += delta;
1737 kvm_arch_ops->decache_regs(vcpu);
1739 io->count -= io->cur_count;
1743 kvm_arch_ops->skip_emulated_instruction(vcpu);
1747 int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1748 int size, unsigned long count, int string, int down,
1749 gva_t address, int rep, unsigned port)
1751 unsigned now, in_page;
1756 vcpu->run->exit_reason = KVM_EXIT_IO;
1757 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
1758 vcpu->run->io.size = size;
1759 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
1760 vcpu->run->io.count = count;
1761 vcpu->run->io.port = port;
1762 vcpu->pio.count = count;
1763 vcpu->pio.cur_count = count;
1764 vcpu->pio.size = size;
1766 vcpu->pio.string = string;
1767 vcpu->pio.down = down;
1768 vcpu->pio.guest_page_offset = offset_in_page(address);
1769 vcpu->pio.rep = rep;
1772 kvm_arch_ops->cache_regs(vcpu);
1773 memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4);
1774 kvm_arch_ops->decache_regs(vcpu);
1779 kvm_arch_ops->skip_emulated_instruction(vcpu);
1783 now = min(count, PAGE_SIZE / size);
1786 in_page = PAGE_SIZE - offset_in_page(address);
1788 in_page = offset_in_page(address) + size;
1789 now = min(count, (unsigned long)in_page / size);
1792 * String I/O straddles page boundary. Pin two guest pages
1793 * so that we satisfy atomicity constraints. Do just one
1794 * transaction to avoid complexity.
1801 * String I/O in reverse. Yuck. Kill the guest, fix later.
1803 printk(KERN_ERR "kvm: guest string pio down\n");
1807 vcpu->run->io.count = now;
1808 vcpu->pio.cur_count = now;
1810 for (i = 0; i < nr_pages; ++i) {
1811 spin_lock(&vcpu->kvm->lock);
1812 page = gva_to_page(vcpu, address + i * PAGE_SIZE);
1815 vcpu->pio.guest_pages[i] = page;
1816 spin_unlock(&vcpu->kvm->lock);
1819 free_pio_guest_pages(vcpu);
1825 return pio_copy_data(vcpu);
1828 EXPORT_SYMBOL_GPL(kvm_setup_pio);
1830 static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1837 if (vcpu->sigset_active)
1838 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1840 /* re-sync apic's tpr */
1841 vcpu->cr8 = kvm_run->cr8;
1843 if (vcpu->pio.cur_count) {
1844 r = complete_pio(vcpu);
1849 if (vcpu->mmio_needed) {
1850 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
1851 vcpu->mmio_read_completed = 1;
1852 vcpu->mmio_needed = 0;
1853 r = emulate_instruction(vcpu, kvm_run,
1854 vcpu->mmio_fault_cr2, 0);
1855 if (r == EMULATE_DO_MMIO) {
1857 * Read-modify-write. Back to userspace.
1859 kvm_run->exit_reason = KVM_EXIT_MMIO;
1865 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
1866 kvm_arch_ops->cache_regs(vcpu);
1867 vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
1868 kvm_arch_ops->decache_regs(vcpu);
1871 r = kvm_arch_ops->run(vcpu, kvm_run);
1874 if (vcpu->sigset_active)
1875 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1881 static int kvm_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu,
1882 struct kvm_regs *regs)
1886 kvm_arch_ops->cache_regs(vcpu);
1888 regs->rax = vcpu->regs[VCPU_REGS_RAX];
1889 regs->rbx = vcpu->regs[VCPU_REGS_RBX];
1890 regs->rcx = vcpu->regs[VCPU_REGS_RCX];
1891 regs->rdx = vcpu->regs[VCPU_REGS_RDX];
1892 regs->rsi = vcpu->regs[VCPU_REGS_RSI];
1893 regs->rdi = vcpu->regs[VCPU_REGS_RDI];
1894 regs->rsp = vcpu->regs[VCPU_REGS_RSP];
1895 regs->rbp = vcpu->regs[VCPU_REGS_RBP];
1896 #ifdef CONFIG_X86_64
1897 regs->r8 = vcpu->regs[VCPU_REGS_R8];
1898 regs->r9 = vcpu->regs[VCPU_REGS_R9];
1899 regs->r10 = vcpu->regs[VCPU_REGS_R10];
1900 regs->r11 = vcpu->regs[VCPU_REGS_R11];
1901 regs->r12 = vcpu->regs[VCPU_REGS_R12];
1902 regs->r13 = vcpu->regs[VCPU_REGS_R13];
1903 regs->r14 = vcpu->regs[VCPU_REGS_R14];
1904 regs->r15 = vcpu->regs[VCPU_REGS_R15];
1907 regs->rip = vcpu->rip;
1908 regs->rflags = kvm_arch_ops->get_rflags(vcpu);
1911 * Don't leak debug flags in case they were set for guest debugging
1913 if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
1914 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
1921 static int kvm_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu,
1922 struct kvm_regs *regs)
1926 vcpu->regs[VCPU_REGS_RAX] = regs->rax;
1927 vcpu->regs[VCPU_REGS_RBX] = regs->rbx;
1928 vcpu->regs[VCPU_REGS_RCX] = regs->rcx;
1929 vcpu->regs[VCPU_REGS_RDX] = regs->rdx;
1930 vcpu->regs[VCPU_REGS_RSI] = regs->rsi;
1931 vcpu->regs[VCPU_REGS_RDI] = regs->rdi;
1932 vcpu->regs[VCPU_REGS_RSP] = regs->rsp;
1933 vcpu->regs[VCPU_REGS_RBP] = regs->rbp;
1934 #ifdef CONFIG_X86_64
1935 vcpu->regs[VCPU_REGS_R8] = regs->r8;
1936 vcpu->regs[VCPU_REGS_R9] = regs->r9;
1937 vcpu->regs[VCPU_REGS_R10] = regs->r10;
1938 vcpu->regs[VCPU_REGS_R11] = regs->r11;
1939 vcpu->regs[VCPU_REGS_R12] = regs->r12;
1940 vcpu->regs[VCPU_REGS_R13] = regs->r13;
1941 vcpu->regs[VCPU_REGS_R14] = regs->r14;
1942 vcpu->regs[VCPU_REGS_R15] = regs->r15;
1945 vcpu->rip = regs->rip;
1946 kvm_arch_ops->set_rflags(vcpu, regs->rflags);
1948 kvm_arch_ops->decache_regs(vcpu);
1955 static void get_segment(struct kvm_vcpu *vcpu,
1956 struct kvm_segment *var, int seg)
1958 return kvm_arch_ops->get_segment(vcpu, var, seg);
1961 static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1962 struct kvm_sregs *sregs)
1964 struct descriptor_table dt;
1968 get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
1969 get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
1970 get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
1971 get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
1972 get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
1973 get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
1975 get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
1976 get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
1978 kvm_arch_ops->get_idt(vcpu, &dt);
1979 sregs->idt.limit = dt.limit;
1980 sregs->idt.base = dt.base;
1981 kvm_arch_ops->get_gdt(vcpu, &dt);
1982 sregs->gdt.limit = dt.limit;
1983 sregs->gdt.base = dt.base;
1985 kvm_arch_ops->decache_cr4_guest_bits(vcpu);
1986 sregs->cr0 = vcpu->cr0;
1987 sregs->cr2 = vcpu->cr2;
1988 sregs->cr3 = vcpu->cr3;
1989 sregs->cr4 = vcpu->cr4;
1990 sregs->cr8 = vcpu->cr8;
1991 sregs->efer = vcpu->shadow_efer;
1992 sregs->apic_base = vcpu->apic_base;
1994 memcpy(sregs->interrupt_bitmap, vcpu->irq_pending,
1995 sizeof sregs->interrupt_bitmap);
2002 static void set_segment(struct kvm_vcpu *vcpu,
2003 struct kvm_segment *var, int seg)
2005 return kvm_arch_ops->set_segment(vcpu, var, seg);
2008 static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2009 struct kvm_sregs *sregs)
2011 int mmu_reset_needed = 0;
2013 struct descriptor_table dt;
2017 dt.limit = sregs->idt.limit;
2018 dt.base = sregs->idt.base;
2019 kvm_arch_ops->set_idt(vcpu, &dt);
2020 dt.limit = sregs->gdt.limit;
2021 dt.base = sregs->gdt.base;
2022 kvm_arch_ops->set_gdt(vcpu, &dt);
2024 vcpu->cr2 = sregs->cr2;
2025 mmu_reset_needed |= vcpu->cr3 != sregs->cr3;
2026 vcpu->cr3 = sregs->cr3;
2028 vcpu->cr8 = sregs->cr8;
2030 mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
2031 #ifdef CONFIG_X86_64
2032 kvm_arch_ops->set_efer(vcpu, sregs->efer);
2034 vcpu->apic_base = sregs->apic_base;
2036 kvm_arch_ops->decache_cr4_guest_bits(vcpu);
2038 mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
2039 kvm_arch_ops->set_cr0(vcpu, sregs->cr0);
2041 mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
2042 kvm_arch_ops->set_cr4(vcpu, sregs->cr4);
2043 if (!is_long_mode(vcpu) && is_pae(vcpu))
2044 load_pdptrs(vcpu, vcpu->cr3);
2046 if (mmu_reset_needed)
2047 kvm_mmu_reset_context(vcpu);
2049 memcpy(vcpu->irq_pending, sregs->interrupt_bitmap,
2050 sizeof vcpu->irq_pending);
2051 vcpu->irq_summary = 0;
2052 for (i = 0; i < NR_IRQ_WORDS; ++i)
2053 if (vcpu->irq_pending[i])
2054 __set_bit(i, &vcpu->irq_summary);
2056 set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
2057 set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
2058 set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
2059 set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
2060 set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
2061 set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
2063 set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
2064 set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
2072 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
2073 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
2075 * This list is modified at module load time to reflect the
2076 * capabilities of the host cpu.
2078 static u32 msrs_to_save[] = {
2079 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
2081 #ifdef CONFIG_X86_64
2082 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
2084 MSR_IA32_TIME_STAMP_COUNTER,
2087 static unsigned num_msrs_to_save;
2089 static u32 emulated_msrs[] = {
2090 MSR_IA32_MISC_ENABLE,
2093 static __init void kvm_init_msr_list(void)
2098 for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
2099 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
2102 msrs_to_save[j] = msrs_to_save[i];
2105 num_msrs_to_save = j;
2109 * Adapt set_msr() to msr_io()'s calling convention
2111 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
2113 return set_msr(vcpu, index, *data);
2117 * Read or write a bunch of msrs. All parameters are kernel addresses.
2119 * @return number of msrs set successfully.
2121 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
2122 struct kvm_msr_entry *entries,
2123 int (*do_msr)(struct kvm_vcpu *vcpu,
2124 unsigned index, u64 *data))
2130 for (i = 0; i < msrs->nmsrs; ++i)
2131 if (do_msr(vcpu, entries[i].index, &entries[i].data))
2140 * Read or write a bunch of msrs. Parameters are user addresses.
2142 * @return number of msrs set successfully.
2144 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
2145 int (*do_msr)(struct kvm_vcpu *vcpu,
2146 unsigned index, u64 *data),
2149 struct kvm_msrs msrs;
2150 struct kvm_msr_entry *entries;
2155 if (copy_from_user(&msrs, user_msrs, sizeof msrs))
2159 if (msrs.nmsrs >= MAX_IO_MSRS)
2163 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
2164 entries = vmalloc(size);
2169 if (copy_from_user(entries, user_msrs->entries, size))
2172 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
2177 if (writeback && copy_to_user(user_msrs->entries, entries, size))
2189 * Translate a guest virtual address to a guest physical address.
2191 static int kvm_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2192 struct kvm_translation *tr)
2194 unsigned long vaddr = tr->linear_address;
2198 spin_lock(&vcpu->kvm->lock);
2199 gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr);
2200 tr->physical_address = gpa;
2201 tr->valid = gpa != UNMAPPED_GVA;
2204 spin_unlock(&vcpu->kvm->lock);
2210 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2211 struct kvm_interrupt *irq)
2213 if (irq->irq < 0 || irq->irq >= 256)
2217 set_bit(irq->irq, vcpu->irq_pending);
2218 set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary);
2225 static int kvm_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
2226 struct kvm_debug_guest *dbg)
2232 r = kvm_arch_ops->set_guest_debug(vcpu, dbg);
2239 static struct page *kvm_vcpu_nopage(struct vm_area_struct *vma,
2240 unsigned long address,
2243 struct kvm_vcpu *vcpu = vma->vm_file->private_data;
2244 unsigned long pgoff;
2247 *type = VM_FAULT_MINOR;
2248 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2250 page = virt_to_page(vcpu->run);
2251 else if (pgoff == KVM_PIO_PAGE_OFFSET)
2252 page = virt_to_page(vcpu->pio_data);
2254 return NOPAGE_SIGBUS;
2259 static struct vm_operations_struct kvm_vcpu_vm_ops = {
2260 .nopage = kvm_vcpu_nopage,
2263 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
2265 vma->vm_ops = &kvm_vcpu_vm_ops;
2269 static int kvm_vcpu_release(struct inode *inode, struct file *filp)
2271 struct kvm_vcpu *vcpu = filp->private_data;
2273 fput(vcpu->kvm->filp);
2277 static struct file_operations kvm_vcpu_fops = {
2278 .release = kvm_vcpu_release,
2279 .unlocked_ioctl = kvm_vcpu_ioctl,
2280 .compat_ioctl = kvm_vcpu_ioctl,
2281 .mmap = kvm_vcpu_mmap,
2285 * Allocates an inode for the vcpu.
2287 static int create_vcpu_fd(struct kvm_vcpu *vcpu)
2290 struct inode *inode;
2293 atomic_inc(&vcpu->kvm->filp->f_count);
2294 inode = kvmfs_inode(&kvm_vcpu_fops);
2295 if (IS_ERR(inode)) {
2300 file = kvmfs_file(inode, vcpu);
2306 r = get_unused_fd();
2310 fd_install(fd, file);
2319 fput(vcpu->kvm->filp);
2324 * Creates some virtual cpus. Good luck creating more than one.
2326 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
2329 struct kvm_vcpu *vcpu;
2336 vcpu = &kvm->vcpus[n];
2338 mutex_lock(&vcpu->mutex);
2341 mutex_unlock(&vcpu->mutex);
2345 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2349 vcpu->run = page_address(page);
2351 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2355 vcpu->pio_data = page_address(page);
2357 vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf,
2359 vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
2362 r = kvm_arch_ops->vcpu_create(vcpu);
2364 goto out_free_vcpus;
2366 r = kvm_mmu_create(vcpu);
2368 goto out_free_vcpus;
2370 kvm_arch_ops->vcpu_load(vcpu);
2371 r = kvm_mmu_setup(vcpu);
2373 r = kvm_arch_ops->vcpu_setup(vcpu);
2377 goto out_free_vcpus;
2379 r = create_vcpu_fd(vcpu);
2381 goto out_free_vcpus;
2386 kvm_free_vcpu(vcpu);
2388 free_page((unsigned long)vcpu->run);
2391 mutex_unlock(&vcpu->mutex);
2396 static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
2400 struct kvm_cpuid_entry *e, *entry;
2402 rdmsrl(MSR_EFER, efer);
2404 for (i = 0; i < vcpu->cpuid_nent; ++i) {
2405 e = &vcpu->cpuid_entries[i];
2406 if (e->function == 0x80000001) {
2411 if (entry && (entry->edx & EFER_NX) && !(efer & EFER_NX)) {
2412 entry->edx &= ~(1 << 20);
2413 printk(KERN_INFO ": guest NX capability removed\n");
2417 static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
2418 struct kvm_cpuid *cpuid,
2419 struct kvm_cpuid_entry __user *entries)
2424 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
2427 if (copy_from_user(&vcpu->cpuid_entries, entries,
2428 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
2430 vcpu->cpuid_nent = cpuid->nent;
2431 cpuid_fix_nx_cap(vcpu);
2438 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
2441 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2442 vcpu->sigset_active = 1;
2443 vcpu->sigset = *sigset;
2445 vcpu->sigset_active = 0;
2450 * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
2451 * we have asm/x86/processor.h
2462 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
2463 #ifdef CONFIG_X86_64
2464 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
2466 u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
2470 static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2472 struct fxsave *fxsave = (struct fxsave *)vcpu->guest_fx_image;
2476 memcpy(fpu->fpr, fxsave->st_space, 128);
2477 fpu->fcw = fxsave->cwd;
2478 fpu->fsw = fxsave->swd;
2479 fpu->ftwx = fxsave->twd;
2480 fpu->last_opcode = fxsave->fop;
2481 fpu->last_ip = fxsave->rip;
2482 fpu->last_dp = fxsave->rdp;
2483 memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
2490 static int kvm_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2492 struct fxsave *fxsave = (struct fxsave *)vcpu->guest_fx_image;
2496 memcpy(fxsave->st_space, fpu->fpr, 128);
2497 fxsave->cwd = fpu->fcw;
2498 fxsave->swd = fpu->fsw;
2499 fxsave->twd = fpu->ftwx;
2500 fxsave->fop = fpu->last_opcode;
2501 fxsave->rip = fpu->last_ip;
2502 fxsave->rdp = fpu->last_dp;
2503 memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
2510 static long kvm_vcpu_ioctl(struct file *filp,
2511 unsigned int ioctl, unsigned long arg)
2513 struct kvm_vcpu *vcpu = filp->private_data;
2514 void __user *argp = (void __user *)arg;
2522 r = kvm_vcpu_ioctl_run(vcpu, vcpu->run);
2524 case KVM_GET_REGS: {
2525 struct kvm_regs kvm_regs;
2527 memset(&kvm_regs, 0, sizeof kvm_regs);
2528 r = kvm_vcpu_ioctl_get_regs(vcpu, &kvm_regs);
2532 if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs))
2537 case KVM_SET_REGS: {
2538 struct kvm_regs kvm_regs;
2541 if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
2543 r = kvm_vcpu_ioctl_set_regs(vcpu, &kvm_regs);
2549 case KVM_GET_SREGS: {
2550 struct kvm_sregs kvm_sregs;
2552 memset(&kvm_sregs, 0, sizeof kvm_sregs);
2553 r = kvm_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
2557 if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
2562 case KVM_SET_SREGS: {
2563 struct kvm_sregs kvm_sregs;
2566 if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
2568 r = kvm_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
2574 case KVM_TRANSLATE: {
2575 struct kvm_translation tr;
2578 if (copy_from_user(&tr, argp, sizeof tr))
2580 r = kvm_vcpu_ioctl_translate(vcpu, &tr);
2584 if (copy_to_user(argp, &tr, sizeof tr))
2589 case KVM_INTERRUPT: {
2590 struct kvm_interrupt irq;
2593 if (copy_from_user(&irq, argp, sizeof irq))
2595 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2601 case KVM_DEBUG_GUEST: {
2602 struct kvm_debug_guest dbg;
2605 if (copy_from_user(&dbg, argp, sizeof dbg))
2607 r = kvm_vcpu_ioctl_debug_guest(vcpu, &dbg);
2614 r = msr_io(vcpu, argp, get_msr, 1);
2617 r = msr_io(vcpu, argp, do_set_msr, 0);
2619 case KVM_SET_CPUID: {
2620 struct kvm_cpuid __user *cpuid_arg = argp;
2621 struct kvm_cpuid cpuid;
2624 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2626 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
2631 case KVM_SET_SIGNAL_MASK: {
2632 struct kvm_signal_mask __user *sigmask_arg = argp;
2633 struct kvm_signal_mask kvm_sigmask;
2634 sigset_t sigset, *p;
2639 if (copy_from_user(&kvm_sigmask, argp,
2640 sizeof kvm_sigmask))
2643 if (kvm_sigmask.len != sizeof sigset)
2646 if (copy_from_user(&sigset, sigmask_arg->sigset,
2651 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
2657 memset(&fpu, 0, sizeof fpu);
2658 r = kvm_vcpu_ioctl_get_fpu(vcpu, &fpu);
2662 if (copy_to_user(argp, &fpu, sizeof fpu))
2671 if (copy_from_user(&fpu, argp, sizeof fpu))
2673 r = kvm_vcpu_ioctl_set_fpu(vcpu, &fpu);
2686 static long kvm_vm_ioctl(struct file *filp,
2687 unsigned int ioctl, unsigned long arg)
2689 struct kvm *kvm = filp->private_data;
2690 void __user *argp = (void __user *)arg;
2694 case KVM_CREATE_VCPU:
2695 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
2699 case KVM_SET_MEMORY_REGION: {
2700 struct kvm_memory_region kvm_mem;
2703 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
2705 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_mem);
2710 case KVM_GET_DIRTY_LOG: {
2711 struct kvm_dirty_log log;
2714 if (copy_from_user(&log, argp, sizeof log))
2716 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
2721 case KVM_SET_MEMORY_ALIAS: {
2722 struct kvm_memory_alias alias;
2725 if (copy_from_user(&alias, argp, sizeof alias))
2727 r = kvm_vm_ioctl_set_memory_alias(kvm, &alias);
2739 static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
2740 unsigned long address,
2743 struct kvm *kvm = vma->vm_file->private_data;
2744 unsigned long pgoff;
2747 *type = VM_FAULT_MINOR;
2748 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2749 page = gfn_to_page(kvm, pgoff);
2751 return NOPAGE_SIGBUS;
2756 static struct vm_operations_struct kvm_vm_vm_ops = {
2757 .nopage = kvm_vm_nopage,
2760 static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
2762 vma->vm_ops = &kvm_vm_vm_ops;
2766 static struct file_operations kvm_vm_fops = {
2767 .release = kvm_vm_release,
2768 .unlocked_ioctl = kvm_vm_ioctl,
2769 .compat_ioctl = kvm_vm_ioctl,
2770 .mmap = kvm_vm_mmap,
2773 static int kvm_dev_ioctl_create_vm(void)
2776 struct inode *inode;
2780 inode = kvmfs_inode(&kvm_vm_fops);
2781 if (IS_ERR(inode)) {
2786 kvm = kvm_create_vm();
2792 file = kvmfs_file(inode, kvm);
2799 r = get_unused_fd();
2803 fd_install(fd, file);
2810 kvm_destroy_vm(kvm);
2817 static long kvm_dev_ioctl(struct file *filp,
2818 unsigned int ioctl, unsigned long arg)
2820 void __user *argp = (void __user *)arg;
2824 case KVM_GET_API_VERSION:
2828 r = KVM_API_VERSION;
2834 r = kvm_dev_ioctl_create_vm();
2836 case KVM_GET_MSR_INDEX_LIST: {
2837 struct kvm_msr_list __user *user_msr_list = argp;
2838 struct kvm_msr_list msr_list;
2842 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
2845 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
2846 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
2849 if (n < num_msrs_to_save)
2852 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
2853 num_msrs_to_save * sizeof(u32)))
2855 if (copy_to_user(user_msr_list->indices
2856 + num_msrs_to_save * sizeof(u32),
2858 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
2863 case KVM_CHECK_EXTENSION:
2865 * No extensions defined at present.
2869 case KVM_GET_VCPU_MMAP_SIZE:
2882 static struct file_operations kvm_chardev_ops = {
2883 .open = kvm_dev_open,
2884 .release = kvm_dev_release,
2885 .unlocked_ioctl = kvm_dev_ioctl,
2886 .compat_ioctl = kvm_dev_ioctl,
2889 static struct miscdevice kvm_dev = {
2895 static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
2898 if (val == SYS_RESTART) {
2900 * Some (well, at least mine) BIOSes hang on reboot if
2903 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
2904 on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
2909 static struct notifier_block kvm_reboot_notifier = {
2910 .notifier_call = kvm_reboot,
2915 * Make sure that a cpu that is being hot-unplugged does not have any vcpus
2918 static void decache_vcpus_on_cpu(int cpu)
2921 struct kvm_vcpu *vcpu;
2924 spin_lock(&kvm_lock);
2925 list_for_each_entry(vm, &vm_list, vm_list)
2926 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
2927 vcpu = &vm->vcpus[i];
2929 * If the vcpu is locked, then it is running on some
2930 * other cpu and therefore it is not cached on the
2933 * If it's not locked, check the last cpu it executed
2936 if (mutex_trylock(&vcpu->mutex)) {
2937 if (vcpu->cpu == cpu) {
2938 kvm_arch_ops->vcpu_decache(vcpu);
2941 mutex_unlock(&vcpu->mutex);
2944 spin_unlock(&kvm_lock);
2947 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2953 case CPU_DOWN_PREPARE:
2954 case CPU_DOWN_PREPARE_FROZEN:
2955 case CPU_UP_CANCELED:
2956 case CPU_UP_CANCELED_FROZEN:
2957 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2959 decache_vcpus_on_cpu(cpu);
2960 smp_call_function_single(cpu, kvm_arch_ops->hardware_disable,
2964 case CPU_ONLINE_FROZEN:
2965 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
2967 smp_call_function_single(cpu, kvm_arch_ops->hardware_enable,
2974 static struct notifier_block kvm_cpu_notifier = {
2975 .notifier_call = kvm_cpu_hotplug,
2976 .priority = 20, /* must be > scheduler priority */
2979 static u64 stat_get(void *_offset)
2981 unsigned offset = (long)_offset;
2984 struct kvm_vcpu *vcpu;
2987 spin_lock(&kvm_lock);
2988 list_for_each_entry(kvm, &vm_list, vm_list)
2989 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
2990 vcpu = &kvm->vcpus[i];
2991 total += *(u32 *)((void *)vcpu + offset);
2993 spin_unlock(&kvm_lock);
2997 static void stat_set(void *offset, u64 val)
3001 DEFINE_SIMPLE_ATTRIBUTE(stat_fops, stat_get, stat_set, "%llu\n");
3003 static __init void kvm_init_debug(void)
3005 struct kvm_stats_debugfs_item *p;
3007 debugfs_dir = debugfs_create_dir("kvm", NULL);
3008 for (p = debugfs_entries; p->name; ++p)
3009 p->dentry = debugfs_create_file(p->name, 0444, debugfs_dir,
3010 (void *)(long)p->offset,
3014 static void kvm_exit_debug(void)
3016 struct kvm_stats_debugfs_item *p;
3018 for (p = debugfs_entries; p->name; ++p)
3019 debugfs_remove(p->dentry);
3020 debugfs_remove(debugfs_dir);
3023 static int kvm_suspend(struct sys_device *dev, pm_message_t state)
3025 decache_vcpus_on_cpu(raw_smp_processor_id());
3026 on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
3030 static int kvm_resume(struct sys_device *dev)
3032 on_each_cpu(kvm_arch_ops->hardware_enable, NULL, 0, 1);
3036 static struct sysdev_class kvm_sysdev_class = {
3037 set_kset_name("kvm"),
3038 .suspend = kvm_suspend,
3039 .resume = kvm_resume,
3042 static struct sys_device kvm_sysdev = {
3044 .cls = &kvm_sysdev_class,
3047 hpa_t bad_page_address;
3049 static int kvmfs_get_sb(struct file_system_type *fs_type, int flags,
3050 const char *dev_name, void *data, struct vfsmount *mnt)
3052 return get_sb_pseudo(fs_type, "kvm:", NULL, KVMFS_SUPER_MAGIC, mnt);
3055 static struct file_system_type kvm_fs_type = {
3057 .get_sb = kvmfs_get_sb,
3058 .kill_sb = kill_anon_super,
3061 int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
3066 printk(KERN_ERR "kvm: already loaded the other module\n");
3070 if (!ops->cpu_has_kvm_support()) {
3071 printk(KERN_ERR "kvm: no hardware support\n");
3074 if (ops->disabled_by_bios()) {
3075 printk(KERN_ERR "kvm: disabled by bios\n");
3081 r = kvm_arch_ops->hardware_setup();
3085 on_each_cpu(kvm_arch_ops->hardware_enable, NULL, 0, 1);
3086 r = register_cpu_notifier(&kvm_cpu_notifier);
3089 register_reboot_notifier(&kvm_reboot_notifier);
3091 r = sysdev_class_register(&kvm_sysdev_class);
3095 r = sysdev_register(&kvm_sysdev);
3099 kvm_chardev_ops.owner = module;
3101 r = misc_register(&kvm_dev);
3103 printk (KERN_ERR "kvm: misc device register failed\n");
3110 sysdev_unregister(&kvm_sysdev);
3112 sysdev_class_unregister(&kvm_sysdev_class);
3114 unregister_reboot_notifier(&kvm_reboot_notifier);
3115 unregister_cpu_notifier(&kvm_cpu_notifier);
3117 on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
3118 kvm_arch_ops->hardware_unsetup();
3120 kvm_arch_ops = NULL;
3124 void kvm_exit_arch(void)
3126 misc_deregister(&kvm_dev);
3127 sysdev_unregister(&kvm_sysdev);
3128 sysdev_class_unregister(&kvm_sysdev_class);
3129 unregister_reboot_notifier(&kvm_reboot_notifier);
3130 unregister_cpu_notifier(&kvm_cpu_notifier);
3131 on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
3132 kvm_arch_ops->hardware_unsetup();
3133 kvm_arch_ops = NULL;
3136 static __init int kvm_init(void)
3138 static struct page *bad_page;
3141 r = kvm_mmu_module_init();
3145 r = register_filesystem(&kvm_fs_type);
3149 kvmfs_mnt = kern_mount(&kvm_fs_type);
3150 r = PTR_ERR(kvmfs_mnt);
3151 if (IS_ERR(kvmfs_mnt))
3155 kvm_init_msr_list();
3157 if ((bad_page = alloc_page(GFP_KERNEL)) == NULL) {
3162 bad_page_address = page_to_pfn(bad_page) << PAGE_SHIFT;
3163 memset(__va(bad_page_address), 0, PAGE_SIZE);
3171 unregister_filesystem(&kvm_fs_type);
3173 kvm_mmu_module_exit();
3178 static __exit void kvm_exit(void)
3181 __free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT));
3183 unregister_filesystem(&kvm_fs_type);
3184 kvm_mmu_module_exit();
3187 module_init(kvm_init)
3188 module_exit(kvm_exit)
3190 EXPORT_SYMBOL_GPL(kvm_init_arch);
3191 EXPORT_SYMBOL_GPL(kvm_exit_arch);