5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
9 #include <linux/types.h>
10 #include <linux/hardirq.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/spinlock.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
17 #include <linux/preempt.h>
18 #include <asm/signal.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_para.h>
23 #define KVM_MAX_VCPUS 4
24 #define KVM_ALIAS_SLOTS 4
25 #define KVM_MEMORY_SLOTS 8
26 /* memory slots that does not exposed to userspace */
27 #define KVM_PRIVATE_MEM_SLOTS 4
28 #define KVM_PERMILLE_MMU_PAGES 20
29 #define KVM_MIN_ALLOC_MMU_PAGES 64
30 #define KVM_NUM_MMU_PAGES 1024
31 #define KVM_MIN_FREE_MMU_PAGES 5
32 #define KVM_REFILL_PAGES 25
33 #define KVM_MAX_CPUID_ENTRIES 40
35 #define KVM_PIO_PAGE_OFFSET 1
38 * vcpu->requests bit members
40 #define KVM_REQ_TLB_FLUSH 0
45 * gva - guest virtual address
46 * gpa - guest physical address
47 * gfn - guest frame number
48 * hva - host virtual address
49 * hpa - host physical address
50 * hfn - host frame number
53 typedef unsigned long gva_t;
55 typedef unsigned long gfn_t;
57 typedef unsigned long hva_t;
59 typedef unsigned long hfn_t;
61 #define NR_PTE_CHAIN_ENTRIES 5
63 struct kvm_pte_chain {
64 u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES];
65 struct hlist_node link;
69 * kvm_mmu_page_role, below, is defined as:
71 * bits 0:3 - total guest paging levels (2-4, or zero for real mode)
72 * bits 4:7 - page table level for this shadow (1-4)
73 * bits 8:9 - page table quadrant for 2-level guests
74 * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode)
75 * bits 17:19 - "access" - the user, writable, and nx bits of a huge page pde
77 union kvm_mmu_page_role {
82 unsigned quadrant : 2;
83 unsigned pad_for_nice_hex_output : 6;
84 unsigned metaphysical : 1;
85 unsigned hugepage_access : 3;
90 struct list_head link;
91 struct hlist_node hash_link;
94 * The following two entries are used to key the shadow page in the
98 union kvm_mmu_page_role role;
101 /* hold the gfn of each spte inside spt */
103 unsigned long slot_bitmap; /* One bit set per slot which has memory
104 * in this shadow page.
106 int multimapped; /* More than one parent_pte? */
107 int root_count; /* Currently serving as active root */
109 u64 *parent_pte; /* !multimapped */
110 struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */
115 extern struct kmem_cache *kvm_vcpu_cache;
118 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
119 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
123 void (*new_cr3)(struct kvm_vcpu *vcpu);
124 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
125 void (*free)(struct kvm_vcpu *vcpu);
126 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
127 void (*prefetch_page)(struct kvm_vcpu *vcpu,
128 struct kvm_mmu_page *page);
131 int shadow_root_level;
136 #define KVM_NR_MEM_OBJS 40
139 * We don't want allocation failures within the mmu code, so we preallocate
140 * enough memory for a single page fault in a cache.
142 struct kvm_mmu_memory_cache {
144 void *objects[KVM_NR_MEM_OBJS];
147 struct kvm_guest_debug {
153 struct kvm_pio_request {
156 struct page *guest_pages[2];
157 unsigned guest_page_offset;
166 struct kvm_vcpu_stat {
176 u32 irq_window_exits;
179 u32 request_irq_exits;
181 u32 host_state_reload;
185 u32 insn_emulation_fail;
188 struct kvm_io_device {
189 void (*read)(struct kvm_io_device *this,
193 void (*write)(struct kvm_io_device *this,
197 int (*in_range)(struct kvm_io_device *this, gpa_t addr);
198 void (*destructor)(struct kvm_io_device *this);
203 static inline void kvm_iodevice_read(struct kvm_io_device *dev,
208 dev->read(dev, addr, len, val);
211 static inline void kvm_iodevice_write(struct kvm_io_device *dev,
216 dev->write(dev, addr, len, val);
219 static inline int kvm_iodevice_inrange(struct kvm_io_device *dev, gpa_t addr)
221 return dev->in_range(dev, addr);
224 static inline void kvm_iodevice_destructor(struct kvm_io_device *dev)
227 dev->destructor(dev);
231 * It would be nice to use something smarter than a linear search, TBD...
232 * Thankfully we dont expect many devices to register (famous last words :),
233 * so until then it will suffice. At least its abstracted so we can change
238 #define NR_IOBUS_DEVS 6
239 struct kvm_io_device *devs[NR_IOBUS_DEVS];
242 void kvm_io_bus_init(struct kvm_io_bus *bus);
243 void kvm_io_bus_destroy(struct kvm_io_bus *bus);
244 struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr);
245 void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
246 struct kvm_io_device *dev);
248 #ifdef CONFIG_HAS_IOMEM
249 #define KVM_VCPU_MMIO \
251 int mmio_read_completed; \
254 unsigned char mmio_data[8]; \
255 gpa_t mmio_phys_addr;
258 #define KVM_VCPU_MMIO
262 #define KVM_VCPU_COMM \
264 struct preempt_notifier preempt_notifier; \
266 struct mutex mutex; \
268 struct kvm_run *run; \
270 unsigned long requests; \
271 struct kvm_guest_debug guest_debug; \
273 int guest_fpu_loaded; \
274 wait_queue_head_t wq; \
277 struct kvm_vcpu_stat stat; \
280 struct kvm_mem_alias {
282 unsigned long npages;
286 struct kvm_memory_slot {
288 unsigned long npages;
291 unsigned long *dirty_bitmap;
292 unsigned long userspace_addr;
297 u32 mmu_shadow_zapped;
303 u32 remote_tlb_flush;
307 struct mutex lock; /* protects everything except vcpus */
308 struct mm_struct *mm; /* userspace tied to this vm */
310 struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
312 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
313 KVM_PRIVATE_MEM_SLOTS];
315 * Hash table of struct kvm_mmu_page.
317 struct list_head active_mmu_pages;
318 unsigned int n_free_mmu_pages;
319 unsigned int n_requested_mmu_pages;
320 unsigned int n_alloc_mmu_pages;
321 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
322 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
323 struct list_head vm_list;
325 struct kvm_io_bus mmio_bus;
326 struct kvm_io_bus pio_bus;
327 struct kvm_pic *vpic;
328 struct kvm_ioapic *vioapic;
329 int round_robin_prev_vcpu;
330 unsigned int tss_addr;
331 struct page *apic_access_page;
332 struct kvm_vm_stat stat;
335 static inline struct kvm_pic *pic_irqchip(struct kvm *kvm)
340 static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm)
345 static inline int irqchip_in_kernel(struct kvm *kvm)
347 return pic_irqchip(kvm) != NULL;
350 struct descriptor_table {
353 } __attribute__((packed));
355 /* The guest did something we don't support. */
356 #define pr_unimpl(vcpu, fmt, ...) \
358 if (printk_ratelimit()) \
359 printk(KERN_ERR "kvm: %i: cpu%i " fmt, \
360 current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \
363 #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
364 #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
366 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
367 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
369 void vcpu_load(struct kvm_vcpu *vcpu);
370 void vcpu_put(struct kvm_vcpu *vcpu);
372 void decache_vcpus_on_cpu(int cpu);
375 int kvm_init(void *opaque, unsigned int vcpu_size,
376 struct module *module);
379 #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
380 #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
381 static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
382 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
384 extern struct page *bad_page;
386 int is_error_page(struct page *page);
387 int kvm_is_error_hva(unsigned long addr);
388 int kvm_set_memory_region(struct kvm *kvm,
389 struct kvm_userspace_memory_region *mem,
391 int __kvm_set_memory_region(struct kvm *kvm,
392 struct kvm_userspace_memory_region *mem,
394 int kvm_arch_set_memory_region(struct kvm *kvm,
395 struct kvm_userspace_memory_region *mem,
396 struct kvm_memory_slot old,
398 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
399 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
400 void kvm_release_page_clean(struct page *page);
401 void kvm_release_page_dirty(struct page *page);
402 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
404 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
405 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
406 int offset, int len);
407 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
409 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
410 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
411 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
412 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
413 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
415 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
416 void kvm_resched(struct kvm_vcpu *vcpu);
417 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
418 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
419 void kvm_flush_remote_tlbs(struct kvm *kvm);
421 long kvm_arch_dev_ioctl(struct file *filp,
422 unsigned int ioctl, unsigned long arg);
423 long kvm_arch_vcpu_ioctl(struct file *filp,
424 unsigned int ioctl, unsigned long arg);
425 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
426 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
428 int kvm_dev_ioctl_check_extension(long ext);
430 int kvm_get_dirty_log(struct kvm *kvm,
431 struct kvm_dirty_log *log, int *is_dirty);
432 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
433 struct kvm_dirty_log *log);
435 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
437 kvm_userspace_memory_region *mem,
439 long kvm_arch_vm_ioctl(struct file *filp,
440 unsigned int ioctl, unsigned long arg);
441 void kvm_arch_destroy_vm(struct kvm *kvm);
443 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
444 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
446 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
447 struct kvm_translation *tr);
449 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
450 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
451 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
452 struct kvm_sregs *sregs);
453 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
454 struct kvm_sregs *sregs);
455 int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
456 struct kvm_debug_guest *dbg);
457 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
459 int kvm_arch_init(void *opaque);
460 void kvm_arch_exit(void);
462 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
463 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
465 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
466 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
467 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
468 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
469 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
470 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
472 int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
473 void kvm_arch_hardware_enable(void *garbage);
474 void kvm_arch_hardware_disable(void *garbage);
475 int kvm_arch_hardware_setup(void);
476 void kvm_arch_hardware_unsetup(void);
477 void kvm_arch_check_processor_compat(void *rtn);
479 void kvm_free_physmem(struct kvm *kvm);
481 struct kvm *kvm_arch_create_vm(void);
482 void kvm_arch_destroy_vm(struct kvm *kvm);
484 static inline void kvm_guest_enter(void)
486 account_system_vtime(current);
487 current->flags |= PF_VCPU;
490 static inline void kvm_guest_exit(void)
492 account_system_vtime(current);
493 current->flags &= ~PF_VCPU;
496 static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
498 return slot - kvm->memslots;
501 static inline gpa_t gfn_to_gpa(gfn_t gfn)
503 return (gpa_t)gfn << PAGE_SHIFT;
511 struct kvm_stats_debugfs_item {
514 enum kvm_stat_kind kind;
515 struct dentry *dentry;
517 extern struct kvm_stats_debugfs_item debugfs_entries[];