]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/kvm/x86.c
KVM: x86 emulator: Rename 'cr2' to 'memop'
[linux-2.6-omap-h63xx.git] / drivers / kvm / x86.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * derived from drivers/kvm/kvm_main.c
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  *
8  * Authors:
9  *   Avi Kivity   <avi@qumranet.com>
10  *   Yaniv Kamay  <yaniv@qumranet.com>
11  *
12  * This work is licensed under the terms of the GNU GPL, version 2.  See
13  * the COPYING file in the top-level directory.
14  *
15  */
16
17 #include "kvm.h"
18 #include "x86.h"
19 #include "x86_emulate.h"
20 #include "segment_descriptor.h"
21 #include "irq.h"
22
23 #include <linux/kvm.h>
24 #include <linux/fs.h>
25 #include <linux/vmalloc.h>
26 #include <linux/module.h>
27 #include <linux/mman.h>
28
29 #include <asm/uaccess.h>
30 #include <asm/msr.h>
31
32 #define MAX_IO_MSRS 256
33 #define CR0_RESERVED_BITS                                               \
34         (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
35                           | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
36                           | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
37 #define CR4_RESERVED_BITS                                               \
38         (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
39                           | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE     \
40                           | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR  \
41                           | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
42
43 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
44 #define EFER_RESERVED_BITS 0xfffffffffffff2fe
45
46 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
47 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
48
49 struct kvm_x86_ops *kvm_x86_ops;
50
51 struct kvm_stats_debugfs_item debugfs_entries[] = {
52         { "pf_fixed", VCPU_STAT(pf_fixed) },
53         { "pf_guest", VCPU_STAT(pf_guest) },
54         { "tlb_flush", VCPU_STAT(tlb_flush) },
55         { "invlpg", VCPU_STAT(invlpg) },
56         { "exits", VCPU_STAT(exits) },
57         { "io_exits", VCPU_STAT(io_exits) },
58         { "mmio_exits", VCPU_STAT(mmio_exits) },
59         { "signal_exits", VCPU_STAT(signal_exits) },
60         { "irq_window", VCPU_STAT(irq_window_exits) },
61         { "halt_exits", VCPU_STAT(halt_exits) },
62         { "halt_wakeup", VCPU_STAT(halt_wakeup) },
63         { "request_irq", VCPU_STAT(request_irq_exits) },
64         { "irq_exits", VCPU_STAT(irq_exits) },
65         { "host_state_reload", VCPU_STAT(host_state_reload) },
66         { "efer_reload", VCPU_STAT(efer_reload) },
67         { "fpu_reload", VCPU_STAT(fpu_reload) },
68         { "insn_emulation", VCPU_STAT(insn_emulation) },
69         { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
70         { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
71         { "mmu_pte_write", VM_STAT(mmu_pte_write) },
72         { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
73         { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
74         { "mmu_flooded", VM_STAT(mmu_flooded) },
75         { "mmu_recycled", VM_STAT(mmu_recycled) },
76         { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
77         { NULL }
78 };
79
80
81 unsigned long segment_base(u16 selector)
82 {
83         struct descriptor_table gdt;
84         struct segment_descriptor *d;
85         unsigned long table_base;
86         unsigned long v;
87
88         if (selector == 0)
89                 return 0;
90
91         asm("sgdt %0" : "=m"(gdt));
92         table_base = gdt.base;
93
94         if (selector & 4) {           /* from ldt */
95                 u16 ldt_selector;
96
97                 asm("sldt %0" : "=g"(ldt_selector));
98                 table_base = segment_base(ldt_selector);
99         }
100         d = (struct segment_descriptor *)(table_base + (selector & ~7));
101         v = d->base_low | ((unsigned long)d->base_mid << 16) |
102                 ((unsigned long)d->base_high << 24);
103 #ifdef CONFIG_X86_64
104         if (d->system == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
105                 v |= ((unsigned long) \
106                       ((struct segment_descriptor_64 *)d)->base_higher) << 32;
107 #endif
108         return v;
109 }
110 EXPORT_SYMBOL_GPL(segment_base);
111
112 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
113 {
114         if (irqchip_in_kernel(vcpu->kvm))
115                 return vcpu->apic_base;
116         else
117                 return vcpu->apic_base;
118 }
119 EXPORT_SYMBOL_GPL(kvm_get_apic_base);
120
121 void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
122 {
123         /* TODO: reserve bits check */
124         if (irqchip_in_kernel(vcpu->kvm))
125                 kvm_lapic_set_base(vcpu, data);
126         else
127                 vcpu->apic_base = data;
128 }
129 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
130
131 static void inject_gp(struct kvm_vcpu *vcpu)
132 {
133         kvm_x86_ops->inject_gp(vcpu, 0);
134 }
135
136 /*
137  * Load the pae pdptrs.  Return true is they are all valid.
138  */
139 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
140 {
141         gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
142         unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
143         int i;
144         int ret;
145         u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)];
146
147         mutex_lock(&vcpu->kvm->lock);
148         ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
149                                   offset * sizeof(u64), sizeof(pdpte));
150         if (ret < 0) {
151                 ret = 0;
152                 goto out;
153         }
154         for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
155                 if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) {
156                         ret = 0;
157                         goto out;
158                 }
159         }
160         ret = 1;
161
162         memcpy(vcpu->pdptrs, pdpte, sizeof(vcpu->pdptrs));
163 out:
164         mutex_unlock(&vcpu->kvm->lock);
165
166         return ret;
167 }
168
169 static bool pdptrs_changed(struct kvm_vcpu *vcpu)
170 {
171         u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)];
172         bool changed = true;
173         int r;
174
175         if (is_long_mode(vcpu) || !is_pae(vcpu))
176                 return false;
177
178         mutex_lock(&vcpu->kvm->lock);
179         r = kvm_read_guest(vcpu->kvm, vcpu->cr3 & ~31u, pdpte, sizeof(pdpte));
180         if (r < 0)
181                 goto out;
182         changed = memcmp(pdpte, vcpu->pdptrs, sizeof(pdpte)) != 0;
183 out:
184         mutex_unlock(&vcpu->kvm->lock);
185
186         return changed;
187 }
188
189 void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
190 {
191         if (cr0 & CR0_RESERVED_BITS) {
192                 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
193                        cr0, vcpu->cr0);
194                 inject_gp(vcpu);
195                 return;
196         }
197
198         if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
199                 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
200                 inject_gp(vcpu);
201                 return;
202         }
203
204         if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
205                 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
206                        "and a clear PE flag\n");
207                 inject_gp(vcpu);
208                 return;
209         }
210
211         if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
212 #ifdef CONFIG_X86_64
213                 if ((vcpu->shadow_efer & EFER_LME)) {
214                         int cs_db, cs_l;
215
216                         if (!is_pae(vcpu)) {
217                                 printk(KERN_DEBUG "set_cr0: #GP, start paging "
218                                        "in long mode while PAE is disabled\n");
219                                 inject_gp(vcpu);
220                                 return;
221                         }
222                         kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
223                         if (cs_l) {
224                                 printk(KERN_DEBUG "set_cr0: #GP, start paging "
225                                        "in long mode while CS.L == 1\n");
226                                 inject_gp(vcpu);
227                                 return;
228
229                         }
230                 } else
231 #endif
232                 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) {
233                         printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
234                                "reserved bits\n");
235                         inject_gp(vcpu);
236                         return;
237                 }
238
239         }
240
241         kvm_x86_ops->set_cr0(vcpu, cr0);
242         vcpu->cr0 = cr0;
243
244         mutex_lock(&vcpu->kvm->lock);
245         kvm_mmu_reset_context(vcpu);
246         mutex_unlock(&vcpu->kvm->lock);
247         return;
248 }
249 EXPORT_SYMBOL_GPL(set_cr0);
250
251 void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
252 {
253         set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f));
254 }
255 EXPORT_SYMBOL_GPL(lmsw);
256
257 void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
258 {
259         if (cr4 & CR4_RESERVED_BITS) {
260                 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
261                 inject_gp(vcpu);
262                 return;
263         }
264
265         if (is_long_mode(vcpu)) {
266                 if (!(cr4 & X86_CR4_PAE)) {
267                         printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
268                                "in long mode\n");
269                         inject_gp(vcpu);
270                         return;
271                 }
272         } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
273                    && !load_pdptrs(vcpu, vcpu->cr3)) {
274                 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
275                 inject_gp(vcpu);
276                 return;
277         }
278
279         if (cr4 & X86_CR4_VMXE) {
280                 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
281                 inject_gp(vcpu);
282                 return;
283         }
284         kvm_x86_ops->set_cr4(vcpu, cr4);
285         vcpu->cr4 = cr4;
286         mutex_lock(&vcpu->kvm->lock);
287         kvm_mmu_reset_context(vcpu);
288         mutex_unlock(&vcpu->kvm->lock);
289 }
290 EXPORT_SYMBOL_GPL(set_cr4);
291
292 void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
293 {
294         if (cr3 == vcpu->cr3 && !pdptrs_changed(vcpu)) {
295                 kvm_mmu_flush_tlb(vcpu);
296                 return;
297         }
298
299         if (is_long_mode(vcpu)) {
300                 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
301                         printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
302                         inject_gp(vcpu);
303                         return;
304                 }
305         } else {
306                 if (is_pae(vcpu)) {
307                         if (cr3 & CR3_PAE_RESERVED_BITS) {
308                                 printk(KERN_DEBUG
309                                        "set_cr3: #GP, reserved bits\n");
310                                 inject_gp(vcpu);
311                                 return;
312                         }
313                         if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
314                                 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
315                                        "reserved bits\n");
316                                 inject_gp(vcpu);
317                                 return;
318                         }
319                 }
320                 /*
321                  * We don't check reserved bits in nonpae mode, because
322                  * this isn't enforced, and VMware depends on this.
323                  */
324         }
325
326         mutex_lock(&vcpu->kvm->lock);
327         /*
328          * Does the new cr3 value map to physical memory? (Note, we
329          * catch an invalid cr3 even in real-mode, because it would
330          * cause trouble later on when we turn on paging anyway.)
331          *
332          * A real CPU would silently accept an invalid cr3 and would
333          * attempt to use it - with largely undefined (and often hard
334          * to debug) behavior on the guest side.
335          */
336         if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
337                 inject_gp(vcpu);
338         else {
339                 vcpu->cr3 = cr3;
340                 vcpu->mmu.new_cr3(vcpu);
341         }
342         mutex_unlock(&vcpu->kvm->lock);
343 }
344 EXPORT_SYMBOL_GPL(set_cr3);
345
346 void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
347 {
348         if (cr8 & CR8_RESERVED_BITS) {
349                 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
350                 inject_gp(vcpu);
351                 return;
352         }
353         if (irqchip_in_kernel(vcpu->kvm))
354                 kvm_lapic_set_tpr(vcpu, cr8);
355         else
356                 vcpu->cr8 = cr8;
357 }
358 EXPORT_SYMBOL_GPL(set_cr8);
359
360 unsigned long get_cr8(struct kvm_vcpu *vcpu)
361 {
362         if (irqchip_in_kernel(vcpu->kvm))
363                 return kvm_lapic_get_cr8(vcpu);
364         else
365                 return vcpu->cr8;
366 }
367 EXPORT_SYMBOL_GPL(get_cr8);
368
369 /*
370  * List of msr numbers which we expose to userspace through KVM_GET_MSRS
371  * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
372  *
373  * This list is modified at module load time to reflect the
374  * capabilities of the host cpu.
375  */
376 static u32 msrs_to_save[] = {
377         MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
378         MSR_K6_STAR,
379 #ifdef CONFIG_X86_64
380         MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
381 #endif
382         MSR_IA32_TIME_STAMP_COUNTER,
383 };
384
385 static unsigned num_msrs_to_save;
386
387 static u32 emulated_msrs[] = {
388         MSR_IA32_MISC_ENABLE,
389 };
390
391 #ifdef CONFIG_X86_64
392
393 static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
394 {
395         if (efer & EFER_RESERVED_BITS) {
396                 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
397                        efer);
398                 inject_gp(vcpu);
399                 return;
400         }
401
402         if (is_paging(vcpu)
403             && (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) {
404                 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
405                 inject_gp(vcpu);
406                 return;
407         }
408
409         kvm_x86_ops->set_efer(vcpu, efer);
410
411         efer &= ~EFER_LMA;
412         efer |= vcpu->shadow_efer & EFER_LMA;
413
414         vcpu->shadow_efer = efer;
415 }
416
417 #endif
418
419 /*
420  * Writes msr value into into the appropriate "register".
421  * Returns 0 on success, non-0 otherwise.
422  * Assumes vcpu_load() was already called.
423  */
424 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
425 {
426         return kvm_x86_ops->set_msr(vcpu, msr_index, data);
427 }
428
429 /*
430  * Adapt set_msr() to msr_io()'s calling convention
431  */
432 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
433 {
434         return kvm_set_msr(vcpu, index, *data);
435 }
436
437
438 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
439 {
440         switch (msr) {
441 #ifdef CONFIG_X86_64
442         case MSR_EFER:
443                 set_efer(vcpu, data);
444                 break;
445 #endif
446         case MSR_IA32_MC0_STATUS:
447                 pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
448                        __FUNCTION__, data);
449                 break;
450         case MSR_IA32_MCG_STATUS:
451                 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
452                         __FUNCTION__, data);
453                 break;
454         case MSR_IA32_UCODE_REV:
455         case MSR_IA32_UCODE_WRITE:
456         case 0x200 ... 0x2ff: /* MTRRs */
457                 break;
458         case MSR_IA32_APICBASE:
459                 kvm_set_apic_base(vcpu, data);
460                 break;
461         case MSR_IA32_MISC_ENABLE:
462                 vcpu->ia32_misc_enable_msr = data;
463                 break;
464         default:
465                 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x\n", msr);
466                 return 1;
467         }
468         return 0;
469 }
470 EXPORT_SYMBOL_GPL(kvm_set_msr_common);
471
472
473 /*
474  * Reads an msr value (of 'msr_index') into 'pdata'.
475  * Returns 0 on success, non-0 otherwise.
476  * Assumes vcpu_load() was already called.
477  */
478 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
479 {
480         return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
481 }
482
483 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
484 {
485         u64 data;
486
487         switch (msr) {
488         case 0xc0010010: /* SYSCFG */
489         case 0xc0010015: /* HWCR */
490         case MSR_IA32_PLATFORM_ID:
491         case MSR_IA32_P5_MC_ADDR:
492         case MSR_IA32_P5_MC_TYPE:
493         case MSR_IA32_MC0_CTL:
494         case MSR_IA32_MCG_STATUS:
495         case MSR_IA32_MCG_CAP:
496         case MSR_IA32_MC0_MISC:
497         case MSR_IA32_MC0_MISC+4:
498         case MSR_IA32_MC0_MISC+8:
499         case MSR_IA32_MC0_MISC+12:
500         case MSR_IA32_MC0_MISC+16:
501         case MSR_IA32_UCODE_REV:
502         case MSR_IA32_PERF_STATUS:
503         case MSR_IA32_EBL_CR_POWERON:
504                 /* MTRR registers */
505         case 0xfe:
506         case 0x200 ... 0x2ff:
507                 data = 0;
508                 break;
509         case 0xcd: /* fsb frequency */
510                 data = 3;
511                 break;
512         case MSR_IA32_APICBASE:
513                 data = kvm_get_apic_base(vcpu);
514                 break;
515         case MSR_IA32_MISC_ENABLE:
516                 data = vcpu->ia32_misc_enable_msr;
517                 break;
518 #ifdef CONFIG_X86_64
519         case MSR_EFER:
520                 data = vcpu->shadow_efer;
521                 break;
522 #endif
523         default:
524                 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
525                 return 1;
526         }
527         *pdata = data;
528         return 0;
529 }
530 EXPORT_SYMBOL_GPL(kvm_get_msr_common);
531
532 /*
533  * Read or write a bunch of msrs. All parameters are kernel addresses.
534  *
535  * @return number of msrs set successfully.
536  */
537 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
538                     struct kvm_msr_entry *entries,
539                     int (*do_msr)(struct kvm_vcpu *vcpu,
540                                   unsigned index, u64 *data))
541 {
542         int i;
543
544         vcpu_load(vcpu);
545
546         for (i = 0; i < msrs->nmsrs; ++i)
547                 if (do_msr(vcpu, entries[i].index, &entries[i].data))
548                         break;
549
550         vcpu_put(vcpu);
551
552         return i;
553 }
554
555 /*
556  * Read or write a bunch of msrs. Parameters are user addresses.
557  *
558  * @return number of msrs set successfully.
559  */
560 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
561                   int (*do_msr)(struct kvm_vcpu *vcpu,
562                                 unsigned index, u64 *data),
563                   int writeback)
564 {
565         struct kvm_msrs msrs;
566         struct kvm_msr_entry *entries;
567         int r, n;
568         unsigned size;
569
570         r = -EFAULT;
571         if (copy_from_user(&msrs, user_msrs, sizeof msrs))
572                 goto out;
573
574         r = -E2BIG;
575         if (msrs.nmsrs >= MAX_IO_MSRS)
576                 goto out;
577
578         r = -ENOMEM;
579         size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
580         entries = vmalloc(size);
581         if (!entries)
582                 goto out;
583
584         r = -EFAULT;
585         if (copy_from_user(entries, user_msrs->entries, size))
586                 goto out_free;
587
588         r = n = __msr_io(vcpu, &msrs, entries, do_msr);
589         if (r < 0)
590                 goto out_free;
591
592         r = -EFAULT;
593         if (writeback && copy_to_user(user_msrs->entries, entries, size))
594                 goto out_free;
595
596         r = n;
597
598 out_free:
599         vfree(entries);
600 out:
601         return r;
602 }
603
604 /*
605  * Make sure that a cpu that is being hot-unplugged does not have any vcpus
606  * cached on it.
607  */
608 void decache_vcpus_on_cpu(int cpu)
609 {
610         struct kvm *vm;
611         struct kvm_vcpu *vcpu;
612         int i;
613
614         spin_lock(&kvm_lock);
615         list_for_each_entry(vm, &vm_list, vm_list)
616                 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
617                         vcpu = vm->vcpus[i];
618                         if (!vcpu)
619                                 continue;
620                         /*
621                          * If the vcpu is locked, then it is running on some
622                          * other cpu and therefore it is not cached on the
623                          * cpu in question.
624                          *
625                          * If it's not locked, check the last cpu it executed
626                          * on.
627                          */
628                         if (mutex_trylock(&vcpu->mutex)) {
629                                 if (vcpu->cpu == cpu) {
630                                         kvm_x86_ops->vcpu_decache(vcpu);
631                                         vcpu->cpu = -1;
632                                 }
633                                 mutex_unlock(&vcpu->mutex);
634                         }
635                 }
636         spin_unlock(&kvm_lock);
637 }
638
639 int kvm_dev_ioctl_check_extension(long ext)
640 {
641         int r;
642
643         switch (ext) {
644         case KVM_CAP_IRQCHIP:
645         case KVM_CAP_HLT:
646         case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
647         case KVM_CAP_USER_MEMORY:
648         case KVM_CAP_SET_TSS_ADDR:
649         case KVM_CAP_EXT_CPUID:
650                 r = 1;
651                 break;
652         default:
653                 r = 0;
654                 break;
655         }
656         return r;
657
658 }
659
660 long kvm_arch_dev_ioctl(struct file *filp,
661                         unsigned int ioctl, unsigned long arg)
662 {
663         void __user *argp = (void __user *)arg;
664         long r;
665
666         switch (ioctl) {
667         case KVM_GET_MSR_INDEX_LIST: {
668                 struct kvm_msr_list __user *user_msr_list = argp;
669                 struct kvm_msr_list msr_list;
670                 unsigned n;
671
672                 r = -EFAULT;
673                 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
674                         goto out;
675                 n = msr_list.nmsrs;
676                 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
677                 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
678                         goto out;
679                 r = -E2BIG;
680                 if (n < num_msrs_to_save)
681                         goto out;
682                 r = -EFAULT;
683                 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
684                                  num_msrs_to_save * sizeof(u32)))
685                         goto out;
686                 if (copy_to_user(user_msr_list->indices
687                                  + num_msrs_to_save * sizeof(u32),
688                                  &emulated_msrs,
689                                  ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
690                         goto out;
691                 r = 0;
692                 break;
693         }
694         default:
695                 r = -EINVAL;
696         }
697 out:
698         return r;
699 }
700
701 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
702 {
703         kvm_x86_ops->vcpu_load(vcpu, cpu);
704 }
705
706 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
707 {
708         kvm_x86_ops->vcpu_put(vcpu);
709         kvm_put_guest_fpu(vcpu);
710 }
711
712 static int is_efer_nx(void)
713 {
714         u64 efer;
715
716         rdmsrl(MSR_EFER, efer);
717         return efer & EFER_NX;
718 }
719
720 static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
721 {
722         int i;
723         struct kvm_cpuid_entry2 *e, *entry;
724
725         entry = NULL;
726         for (i = 0; i < vcpu->cpuid_nent; ++i) {
727                 e = &vcpu->cpuid_entries[i];
728                 if (e->function == 0x80000001) {
729                         entry = e;
730                         break;
731                 }
732         }
733         if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
734                 entry->edx &= ~(1 << 20);
735                 printk(KERN_INFO "kvm: guest NX capability removed\n");
736         }
737 }
738
739 /* when an old userspace process fills a new kernel module */
740 static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
741                                     struct kvm_cpuid *cpuid,
742                                     struct kvm_cpuid_entry __user *entries)
743 {
744         int r, i;
745         struct kvm_cpuid_entry *cpuid_entries;
746
747         r = -E2BIG;
748         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
749                 goto out;
750         r = -ENOMEM;
751         cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
752         if (!cpuid_entries)
753                 goto out;
754         r = -EFAULT;
755         if (copy_from_user(cpuid_entries, entries,
756                            cpuid->nent * sizeof(struct kvm_cpuid_entry)))
757                 goto out_free;
758         for (i = 0; i < cpuid->nent; i++) {
759                 vcpu->cpuid_entries[i].function = cpuid_entries[i].function;
760                 vcpu->cpuid_entries[i].eax = cpuid_entries[i].eax;
761                 vcpu->cpuid_entries[i].ebx = cpuid_entries[i].ebx;
762                 vcpu->cpuid_entries[i].ecx = cpuid_entries[i].ecx;
763                 vcpu->cpuid_entries[i].edx = cpuid_entries[i].edx;
764                 vcpu->cpuid_entries[i].index = 0;
765                 vcpu->cpuid_entries[i].flags = 0;
766                 vcpu->cpuid_entries[i].padding[0] = 0;
767                 vcpu->cpuid_entries[i].padding[1] = 0;
768                 vcpu->cpuid_entries[i].padding[2] = 0;
769         }
770         vcpu->cpuid_nent = cpuid->nent;
771         cpuid_fix_nx_cap(vcpu);
772         r = 0;
773
774 out_free:
775         vfree(cpuid_entries);
776 out:
777         return r;
778 }
779
780 static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
781                                     struct kvm_cpuid2 *cpuid,
782                                     struct kvm_cpuid_entry2 __user *entries)
783 {
784         int r;
785
786         r = -E2BIG;
787         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
788                 goto out;
789         r = -EFAULT;
790         if (copy_from_user(&vcpu->cpuid_entries, entries,
791                            cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
792                 goto out;
793         vcpu->cpuid_nent = cpuid->nent;
794         return 0;
795
796 out:
797         return r;
798 }
799
800 static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
801                                     struct kvm_cpuid2 *cpuid,
802                                     struct kvm_cpuid_entry2 __user *entries)
803 {
804         int r;
805
806         r = -E2BIG;
807         if (cpuid->nent < vcpu->cpuid_nent)
808                 goto out;
809         r = -EFAULT;
810         if (copy_to_user(entries, &vcpu->cpuid_entries,
811                            vcpu->cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
812                 goto out;
813         return 0;
814
815 out:
816         cpuid->nent = vcpu->cpuid_nent;
817         return r;
818 }
819
820 static inline u32 bit(int bitno)
821 {
822         return 1 << (bitno & 31);
823 }
824
825 static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
826                           u32 index)
827 {
828         entry->function = function;
829         entry->index = index;
830         cpuid_count(entry->function, entry->index,
831                 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
832         entry->flags = 0;
833 }
834
835 static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
836                          u32 index, int *nent, int maxnent)
837 {
838         const u32 kvm_supported_word0_x86_features = bit(X86_FEATURE_FPU) |
839                 bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
840                 bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
841                 bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
842                 bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
843                 bit(X86_FEATURE_SEP) | bit(X86_FEATURE_PGE) |
844                 bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
845                 bit(X86_FEATURE_CLFLSH) | bit(X86_FEATURE_MMX) |
846                 bit(X86_FEATURE_FXSR) | bit(X86_FEATURE_XMM) |
847                 bit(X86_FEATURE_XMM2) | bit(X86_FEATURE_SELFSNOOP);
848         const u32 kvm_supported_word1_x86_features = bit(X86_FEATURE_FPU) |
849                 bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
850                 bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
851                 bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
852                 bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
853                 bit(X86_FEATURE_PGE) |
854                 bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
855                 bit(X86_FEATURE_MMX) | bit(X86_FEATURE_FXSR) |
856                 bit(X86_FEATURE_SYSCALL) |
857                 (bit(X86_FEATURE_NX) && is_efer_nx()) |
858 #ifdef CONFIG_X86_64
859                 bit(X86_FEATURE_LM) |
860 #endif
861                 bit(X86_FEATURE_MMXEXT) |
862                 bit(X86_FEATURE_3DNOWEXT) |
863                 bit(X86_FEATURE_3DNOW);
864         const u32 kvm_supported_word3_x86_features =
865                 bit(X86_FEATURE_XMM3) | bit(X86_FEATURE_CX16);
866         const u32 kvm_supported_word6_x86_features =
867                 bit(X86_FEATURE_LAHF_LM) | bit(X86_FEATURE_CMP_LEGACY);
868
869         /* all func 2 cpuid_count() should be called on the same cpu */
870         get_cpu();
871         do_cpuid_1_ent(entry, function, index);
872         ++*nent;
873
874         switch (function) {
875         case 0:
876                 entry->eax = min(entry->eax, (u32)0xb);
877                 break;
878         case 1:
879                 entry->edx &= kvm_supported_word0_x86_features;
880                 entry->ecx &= kvm_supported_word3_x86_features;
881                 break;
882         /* function 2 entries are STATEFUL. That is, repeated cpuid commands
883          * may return different values. This forces us to get_cpu() before
884          * issuing the first command, and also to emulate this annoying behavior
885          * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
886         case 2: {
887                 int t, times = entry->eax & 0xff;
888
889                 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
890                 for (t = 1; t < times && *nent < maxnent; ++t) {
891                         do_cpuid_1_ent(&entry[t], function, 0);
892                         entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
893                         ++*nent;
894                 }
895                 break;
896         }
897         /* function 4 and 0xb have additional index. */
898         case 4: {
899                 int index, cache_type;
900
901                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
902                 /* read more entries until cache_type is zero */
903                 for (index = 1; *nent < maxnent; ++index) {
904                         cache_type = entry[index - 1].eax & 0x1f;
905                         if (!cache_type)
906                                 break;
907                         do_cpuid_1_ent(&entry[index], function, index);
908                         entry[index].flags |=
909                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
910                         ++*nent;
911                 }
912                 break;
913         }
914         case 0xb: {
915                 int index, level_type;
916
917                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
918                 /* read more entries until level_type is zero */
919                 for (index = 1; *nent < maxnent; ++index) {
920                         level_type = entry[index - 1].ecx & 0xff;
921                         if (!level_type)
922                                 break;
923                         do_cpuid_1_ent(&entry[index], function, index);
924                         entry[index].flags |=
925                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
926                         ++*nent;
927                 }
928                 break;
929         }
930         case 0x80000000:
931                 entry->eax = min(entry->eax, 0x8000001a);
932                 break;
933         case 0x80000001:
934                 entry->edx &= kvm_supported_word1_x86_features;
935                 entry->ecx &= kvm_supported_word6_x86_features;
936                 break;
937         }
938         put_cpu();
939 }
940
941 static int kvm_vm_ioctl_get_supported_cpuid(struct kvm *kvm,
942                                     struct kvm_cpuid2 *cpuid,
943                                     struct kvm_cpuid_entry2 __user *entries)
944 {
945         struct kvm_cpuid_entry2 *cpuid_entries;
946         int limit, nent = 0, r = -E2BIG;
947         u32 func;
948
949         if (cpuid->nent < 1)
950                 goto out;
951         r = -ENOMEM;
952         cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
953         if (!cpuid_entries)
954                 goto out;
955
956         do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
957         limit = cpuid_entries[0].eax;
958         for (func = 1; func <= limit && nent < cpuid->nent; ++func)
959                 do_cpuid_ent(&cpuid_entries[nent], func, 0,
960                                 &nent, cpuid->nent);
961         r = -E2BIG;
962         if (nent >= cpuid->nent)
963                 goto out_free;
964
965         do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
966         limit = cpuid_entries[nent - 1].eax;
967         for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
968                 do_cpuid_ent(&cpuid_entries[nent], func, 0,
969                                &nent, cpuid->nent);
970         r = -EFAULT;
971         if (copy_to_user(entries, cpuid_entries,
972                         nent * sizeof(struct kvm_cpuid_entry2)))
973                 goto out_free;
974         cpuid->nent = nent;
975         r = 0;
976
977 out_free:
978         vfree(cpuid_entries);
979 out:
980         return r;
981 }
982
983 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
984                                     struct kvm_lapic_state *s)
985 {
986         vcpu_load(vcpu);
987         memcpy(s->regs, vcpu->apic->regs, sizeof *s);
988         vcpu_put(vcpu);
989
990         return 0;
991 }
992
993 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
994                                     struct kvm_lapic_state *s)
995 {
996         vcpu_load(vcpu);
997         memcpy(vcpu->apic->regs, s->regs, sizeof *s);
998         kvm_apic_post_state_restore(vcpu);
999         vcpu_put(vcpu);
1000
1001         return 0;
1002 }
1003
1004 long kvm_arch_vcpu_ioctl(struct file *filp,
1005                          unsigned int ioctl, unsigned long arg)
1006 {
1007         struct kvm_vcpu *vcpu = filp->private_data;
1008         void __user *argp = (void __user *)arg;
1009         int r;
1010
1011         switch (ioctl) {
1012         case KVM_GET_LAPIC: {
1013                 struct kvm_lapic_state lapic;
1014
1015                 memset(&lapic, 0, sizeof lapic);
1016                 r = kvm_vcpu_ioctl_get_lapic(vcpu, &lapic);
1017                 if (r)
1018                         goto out;
1019                 r = -EFAULT;
1020                 if (copy_to_user(argp, &lapic, sizeof lapic))
1021                         goto out;
1022                 r = 0;
1023                 break;
1024         }
1025         case KVM_SET_LAPIC: {
1026                 struct kvm_lapic_state lapic;
1027
1028                 r = -EFAULT;
1029                 if (copy_from_user(&lapic, argp, sizeof lapic))
1030                         goto out;
1031                 r = kvm_vcpu_ioctl_set_lapic(vcpu, &lapic);;
1032                 if (r)
1033                         goto out;
1034                 r = 0;
1035                 break;
1036         }
1037         case KVM_SET_CPUID: {
1038                 struct kvm_cpuid __user *cpuid_arg = argp;
1039                 struct kvm_cpuid cpuid;
1040
1041                 r = -EFAULT;
1042                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1043                         goto out;
1044                 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
1045                 if (r)
1046                         goto out;
1047                 break;
1048         }
1049         case KVM_SET_CPUID2: {
1050                 struct kvm_cpuid2 __user *cpuid_arg = argp;
1051                 struct kvm_cpuid2 cpuid;
1052
1053                 r = -EFAULT;
1054                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1055                         goto out;
1056                 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
1057                                 cpuid_arg->entries);
1058                 if (r)
1059                         goto out;
1060                 break;
1061         }
1062         case KVM_GET_CPUID2: {
1063                 struct kvm_cpuid2 __user *cpuid_arg = argp;
1064                 struct kvm_cpuid2 cpuid;
1065
1066                 r = -EFAULT;
1067                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1068                         goto out;
1069                 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
1070                                 cpuid_arg->entries);
1071                 if (r)
1072                         goto out;
1073                 r = -EFAULT;
1074                 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1075                         goto out;
1076                 r = 0;
1077                 break;
1078         }
1079         case KVM_GET_MSRS:
1080                 r = msr_io(vcpu, argp, kvm_get_msr, 1);
1081                 break;
1082         case KVM_SET_MSRS:
1083                 r = msr_io(vcpu, argp, do_set_msr, 0);
1084                 break;
1085         default:
1086                 r = -EINVAL;
1087         }
1088 out:
1089         return r;
1090 }
1091
1092 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
1093 {
1094         int ret;
1095
1096         if (addr > (unsigned int)(-3 * PAGE_SIZE))
1097                 return -1;
1098         ret = kvm_x86_ops->set_tss_addr(kvm, addr);
1099         return ret;
1100 }
1101
1102 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
1103                                           u32 kvm_nr_mmu_pages)
1104 {
1105         if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
1106                 return -EINVAL;
1107
1108         mutex_lock(&kvm->lock);
1109
1110         kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
1111         kvm->n_requested_mmu_pages = kvm_nr_mmu_pages;
1112
1113         mutex_unlock(&kvm->lock);
1114         return 0;
1115 }
1116
1117 static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
1118 {
1119         return kvm->n_alloc_mmu_pages;
1120 }
1121
1122 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
1123 {
1124         int i;
1125         struct kvm_mem_alias *alias;
1126
1127         for (i = 0; i < kvm->naliases; ++i) {
1128                 alias = &kvm->aliases[i];
1129                 if (gfn >= alias->base_gfn
1130                     && gfn < alias->base_gfn + alias->npages)
1131                         return alias->target_gfn + gfn - alias->base_gfn;
1132         }
1133         return gfn;
1134 }
1135
1136 /*
1137  * Set a new alias region.  Aliases map a portion of physical memory into
1138  * another portion.  This is useful for memory windows, for example the PC
1139  * VGA region.
1140  */
1141 static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
1142                                          struct kvm_memory_alias *alias)
1143 {
1144         int r, n;
1145         struct kvm_mem_alias *p;
1146
1147         r = -EINVAL;
1148         /* General sanity checks */
1149         if (alias->memory_size & (PAGE_SIZE - 1))
1150                 goto out;
1151         if (alias->guest_phys_addr & (PAGE_SIZE - 1))
1152                 goto out;
1153         if (alias->slot >= KVM_ALIAS_SLOTS)
1154                 goto out;
1155         if (alias->guest_phys_addr + alias->memory_size
1156             < alias->guest_phys_addr)
1157                 goto out;
1158         if (alias->target_phys_addr + alias->memory_size
1159             < alias->target_phys_addr)
1160                 goto out;
1161
1162         mutex_lock(&kvm->lock);
1163
1164         p = &kvm->aliases[alias->slot];
1165         p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
1166         p->npages = alias->memory_size >> PAGE_SHIFT;
1167         p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
1168
1169         for (n = KVM_ALIAS_SLOTS; n > 0; --n)
1170                 if (kvm->aliases[n - 1].npages)
1171                         break;
1172         kvm->naliases = n;
1173
1174         kvm_mmu_zap_all(kvm);
1175
1176         mutex_unlock(&kvm->lock);
1177
1178         return 0;
1179
1180 out:
1181         return r;
1182 }
1183
1184 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1185 {
1186         int r;
1187
1188         r = 0;
1189         switch (chip->chip_id) {
1190         case KVM_IRQCHIP_PIC_MASTER:
1191                 memcpy(&chip->chip.pic,
1192                         &pic_irqchip(kvm)->pics[0],
1193                         sizeof(struct kvm_pic_state));
1194                 break;
1195         case KVM_IRQCHIP_PIC_SLAVE:
1196                 memcpy(&chip->chip.pic,
1197                         &pic_irqchip(kvm)->pics[1],
1198                         sizeof(struct kvm_pic_state));
1199                 break;
1200         case KVM_IRQCHIP_IOAPIC:
1201                 memcpy(&chip->chip.ioapic,
1202                         ioapic_irqchip(kvm),
1203                         sizeof(struct kvm_ioapic_state));
1204                 break;
1205         default:
1206                 r = -EINVAL;
1207                 break;
1208         }
1209         return r;
1210 }
1211
1212 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1213 {
1214         int r;
1215
1216         r = 0;
1217         switch (chip->chip_id) {
1218         case KVM_IRQCHIP_PIC_MASTER:
1219                 memcpy(&pic_irqchip(kvm)->pics[0],
1220                         &chip->chip.pic,
1221                         sizeof(struct kvm_pic_state));
1222                 break;
1223         case KVM_IRQCHIP_PIC_SLAVE:
1224                 memcpy(&pic_irqchip(kvm)->pics[1],
1225                         &chip->chip.pic,
1226                         sizeof(struct kvm_pic_state));
1227                 break;
1228         case KVM_IRQCHIP_IOAPIC:
1229                 memcpy(ioapic_irqchip(kvm),
1230                         &chip->chip.ioapic,
1231                         sizeof(struct kvm_ioapic_state));
1232                 break;
1233         default:
1234                 r = -EINVAL;
1235                 break;
1236         }
1237         kvm_pic_update_irq(pic_irqchip(kvm));
1238         return r;
1239 }
1240
1241 /*
1242  * Get (and clear) the dirty memory log for a memory slot.
1243  */
1244 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1245                                       struct kvm_dirty_log *log)
1246 {
1247         int r;
1248         int n;
1249         struct kvm_memory_slot *memslot;
1250         int is_dirty = 0;
1251
1252         mutex_lock(&kvm->lock);
1253
1254         r = kvm_get_dirty_log(kvm, log, &is_dirty);
1255         if (r)
1256                 goto out;
1257
1258         /* If nothing is dirty, don't bother messing with page tables. */
1259         if (is_dirty) {
1260                 kvm_mmu_slot_remove_write_access(kvm, log->slot);
1261                 kvm_flush_remote_tlbs(kvm);
1262                 memslot = &kvm->memslots[log->slot];
1263                 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1264                 memset(memslot->dirty_bitmap, 0, n);
1265         }
1266         r = 0;
1267 out:
1268         mutex_unlock(&kvm->lock);
1269         return r;
1270 }
1271
1272 long kvm_arch_vm_ioctl(struct file *filp,
1273                        unsigned int ioctl, unsigned long arg)
1274 {
1275         struct kvm *kvm = filp->private_data;
1276         void __user *argp = (void __user *)arg;
1277         int r = -EINVAL;
1278
1279         switch (ioctl) {
1280         case KVM_SET_TSS_ADDR:
1281                 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
1282                 if (r < 0)
1283                         goto out;
1284                 break;
1285         case KVM_SET_MEMORY_REGION: {
1286                 struct kvm_memory_region kvm_mem;
1287                 struct kvm_userspace_memory_region kvm_userspace_mem;
1288
1289                 r = -EFAULT;
1290                 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
1291                         goto out;
1292                 kvm_userspace_mem.slot = kvm_mem.slot;
1293                 kvm_userspace_mem.flags = kvm_mem.flags;
1294                 kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
1295                 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
1296                 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
1297                 if (r)
1298                         goto out;
1299                 break;
1300         }
1301         case KVM_SET_NR_MMU_PAGES:
1302                 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
1303                 if (r)
1304                         goto out;
1305                 break;
1306         case KVM_GET_NR_MMU_PAGES:
1307                 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
1308                 break;
1309         case KVM_SET_MEMORY_ALIAS: {
1310                 struct kvm_memory_alias alias;
1311
1312                 r = -EFAULT;
1313                 if (copy_from_user(&alias, argp, sizeof alias))
1314                         goto out;
1315                 r = kvm_vm_ioctl_set_memory_alias(kvm, &alias);
1316                 if (r)
1317                         goto out;
1318                 break;
1319         }
1320         case KVM_CREATE_IRQCHIP:
1321                 r = -ENOMEM;
1322                 kvm->vpic = kvm_create_pic(kvm);
1323                 if (kvm->vpic) {
1324                         r = kvm_ioapic_init(kvm);
1325                         if (r) {
1326                                 kfree(kvm->vpic);
1327                                 kvm->vpic = NULL;
1328                                 goto out;
1329                         }
1330                 } else
1331                         goto out;
1332                 break;
1333         case KVM_IRQ_LINE: {
1334                 struct kvm_irq_level irq_event;
1335
1336                 r = -EFAULT;
1337                 if (copy_from_user(&irq_event, argp, sizeof irq_event))
1338                         goto out;
1339                 if (irqchip_in_kernel(kvm)) {
1340                         mutex_lock(&kvm->lock);
1341                         if (irq_event.irq < 16)
1342                                 kvm_pic_set_irq(pic_irqchip(kvm),
1343                                         irq_event.irq,
1344                                         irq_event.level);
1345                         kvm_ioapic_set_irq(kvm->vioapic,
1346                                         irq_event.irq,
1347                                         irq_event.level);
1348                         mutex_unlock(&kvm->lock);
1349                         r = 0;
1350                 }
1351                 break;
1352         }
1353         case KVM_GET_IRQCHIP: {
1354                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
1355                 struct kvm_irqchip chip;
1356
1357                 r = -EFAULT;
1358                 if (copy_from_user(&chip, argp, sizeof chip))
1359                         goto out;
1360                 r = -ENXIO;
1361                 if (!irqchip_in_kernel(kvm))
1362                         goto out;
1363                 r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
1364                 if (r)
1365                         goto out;
1366                 r = -EFAULT;
1367                 if (copy_to_user(argp, &chip, sizeof chip))
1368                         goto out;
1369                 r = 0;
1370                 break;
1371         }
1372         case KVM_SET_IRQCHIP: {
1373                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
1374                 struct kvm_irqchip chip;
1375
1376                 r = -EFAULT;
1377                 if (copy_from_user(&chip, argp, sizeof chip))
1378                         goto out;
1379                 r = -ENXIO;
1380                 if (!irqchip_in_kernel(kvm))
1381                         goto out;
1382                 r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
1383                 if (r)
1384                         goto out;
1385                 r = 0;
1386                 break;
1387         }
1388         case KVM_GET_SUPPORTED_CPUID: {
1389                 struct kvm_cpuid2 __user *cpuid_arg = argp;
1390                 struct kvm_cpuid2 cpuid;
1391
1392                 r = -EFAULT;
1393                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1394                         goto out;
1395                 r = kvm_vm_ioctl_get_supported_cpuid(kvm, &cpuid,
1396                         cpuid_arg->entries);
1397                 if (r)
1398                         goto out;
1399
1400                 r = -EFAULT;
1401                 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1402                         goto out;
1403                 r = 0;
1404                 break;
1405         }
1406         default:
1407                 ;
1408         }
1409 out:
1410         return r;
1411 }
1412
1413 static void kvm_init_msr_list(void)
1414 {
1415         u32 dummy[2];
1416         unsigned i, j;
1417
1418         for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
1419                 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
1420                         continue;
1421                 if (j < i)
1422                         msrs_to_save[j] = msrs_to_save[i];
1423                 j++;
1424         }
1425         num_msrs_to_save = j;
1426 }
1427
1428 /*
1429  * Only apic need an MMIO device hook, so shortcut now..
1430  */
1431 static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
1432                                                 gpa_t addr)
1433 {
1434         struct kvm_io_device *dev;
1435
1436         if (vcpu->apic) {
1437                 dev = &vcpu->apic->dev;
1438                 if (dev->in_range(dev, addr))
1439                         return dev;
1440         }
1441         return NULL;
1442 }
1443
1444
1445 static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
1446                                                 gpa_t addr)
1447 {
1448         struct kvm_io_device *dev;
1449
1450         dev = vcpu_find_pervcpu_dev(vcpu, addr);
1451         if (dev == NULL)
1452                 dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
1453         return dev;
1454 }
1455
1456 int emulator_read_std(unsigned long addr,
1457                              void *val,
1458                              unsigned int bytes,
1459                              struct kvm_vcpu *vcpu)
1460 {
1461         void *data = val;
1462
1463         while (bytes) {
1464                 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1465                 unsigned offset = addr & (PAGE_SIZE-1);
1466                 unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
1467                 int ret;
1468
1469                 if (gpa == UNMAPPED_GVA)
1470                         return X86EMUL_PROPAGATE_FAULT;
1471                 ret = kvm_read_guest(vcpu->kvm, gpa, data, tocopy);
1472                 if (ret < 0)
1473                         return X86EMUL_UNHANDLEABLE;
1474
1475                 bytes -= tocopy;
1476                 data += tocopy;
1477                 addr += tocopy;
1478         }
1479
1480         return X86EMUL_CONTINUE;
1481 }
1482 EXPORT_SYMBOL_GPL(emulator_read_std);
1483
1484 static int emulator_read_emulated(unsigned long addr,
1485                                   void *val,
1486                                   unsigned int bytes,
1487                                   struct kvm_vcpu *vcpu)
1488 {
1489         struct kvm_io_device *mmio_dev;
1490         gpa_t                 gpa;
1491
1492         if (vcpu->mmio_read_completed) {
1493                 memcpy(val, vcpu->mmio_data, bytes);
1494                 vcpu->mmio_read_completed = 0;
1495                 return X86EMUL_CONTINUE;
1496         }
1497
1498         gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1499
1500         /* For APIC access vmexit */
1501         if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
1502                 goto mmio;
1503
1504         if (emulator_read_std(addr, val, bytes, vcpu)
1505                         == X86EMUL_CONTINUE)
1506                 return X86EMUL_CONTINUE;
1507         if (gpa == UNMAPPED_GVA)
1508                 return X86EMUL_PROPAGATE_FAULT;
1509
1510 mmio:
1511         /*
1512          * Is this MMIO handled locally?
1513          */
1514         mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1515         if (mmio_dev) {
1516                 kvm_iodevice_read(mmio_dev, gpa, bytes, val);
1517                 return X86EMUL_CONTINUE;
1518         }
1519
1520         vcpu->mmio_needed = 1;
1521         vcpu->mmio_phys_addr = gpa;
1522         vcpu->mmio_size = bytes;
1523         vcpu->mmio_is_write = 0;
1524
1525         return X86EMUL_UNHANDLEABLE;
1526 }
1527
1528 static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1529                                const void *val, int bytes)
1530 {
1531         int ret;
1532
1533         ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
1534         if (ret < 0)
1535                 return 0;
1536         kvm_mmu_pte_write(vcpu, gpa, val, bytes);
1537         return 1;
1538 }
1539
1540 static int emulator_write_emulated_onepage(unsigned long addr,
1541                                            const void *val,
1542                                            unsigned int bytes,
1543                                            struct kvm_vcpu *vcpu)
1544 {
1545         struct kvm_io_device *mmio_dev;
1546         gpa_t                 gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1547
1548         if (gpa == UNMAPPED_GVA) {
1549                 kvm_x86_ops->inject_page_fault(vcpu, addr, 2);
1550                 return X86EMUL_PROPAGATE_FAULT;
1551         }
1552
1553         /* For APIC access vmexit */
1554         if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
1555                 goto mmio;
1556
1557         if (emulator_write_phys(vcpu, gpa, val, bytes))
1558                 return X86EMUL_CONTINUE;
1559
1560 mmio:
1561         /*
1562          * Is this MMIO handled locally?
1563          */
1564         mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1565         if (mmio_dev) {
1566                 kvm_iodevice_write(mmio_dev, gpa, bytes, val);
1567                 return X86EMUL_CONTINUE;
1568         }
1569
1570         vcpu->mmio_needed = 1;
1571         vcpu->mmio_phys_addr = gpa;
1572         vcpu->mmio_size = bytes;
1573         vcpu->mmio_is_write = 1;
1574         memcpy(vcpu->mmio_data, val, bytes);
1575
1576         return X86EMUL_CONTINUE;
1577 }
1578
1579 int emulator_write_emulated(unsigned long addr,
1580                                    const void *val,
1581                                    unsigned int bytes,
1582                                    struct kvm_vcpu *vcpu)
1583 {
1584         /* Crossing a page boundary? */
1585         if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
1586                 int rc, now;
1587
1588                 now = -addr & ~PAGE_MASK;
1589                 rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
1590                 if (rc != X86EMUL_CONTINUE)
1591                         return rc;
1592                 addr += now;
1593                 val += now;
1594                 bytes -= now;
1595         }
1596         return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
1597 }
1598 EXPORT_SYMBOL_GPL(emulator_write_emulated);
1599
1600 static int emulator_cmpxchg_emulated(unsigned long addr,
1601                                      const void *old,
1602                                      const void *new,
1603                                      unsigned int bytes,
1604                                      struct kvm_vcpu *vcpu)
1605 {
1606         static int reported;
1607
1608         if (!reported) {
1609                 reported = 1;
1610                 printk(KERN_WARNING "kvm: emulating exchange as write\n");
1611         }
1612         return emulator_write_emulated(addr, new, bytes, vcpu);
1613 }
1614
1615 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
1616 {
1617         return kvm_x86_ops->get_segment_base(vcpu, seg);
1618 }
1619
1620 int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
1621 {
1622         return X86EMUL_CONTINUE;
1623 }
1624
1625 int emulate_clts(struct kvm_vcpu *vcpu)
1626 {
1627         kvm_x86_ops->set_cr0(vcpu, vcpu->cr0 & ~X86_CR0_TS);
1628         return X86EMUL_CONTINUE;
1629 }
1630
1631 int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
1632 {
1633         struct kvm_vcpu *vcpu = ctxt->vcpu;
1634
1635         switch (dr) {
1636         case 0 ... 3:
1637                 *dest = kvm_x86_ops->get_dr(vcpu, dr);
1638                 return X86EMUL_CONTINUE;
1639         default:
1640                 pr_unimpl(vcpu, "%s: unexpected dr %u\n", __FUNCTION__, dr);
1641                 return X86EMUL_UNHANDLEABLE;
1642         }
1643 }
1644
1645 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
1646 {
1647         unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
1648         int exception;
1649
1650         kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
1651         if (exception) {
1652                 /* FIXME: better handling */
1653                 return X86EMUL_UNHANDLEABLE;
1654         }
1655         return X86EMUL_CONTINUE;
1656 }
1657
1658 void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
1659 {
1660         static int reported;
1661         u8 opcodes[4];
1662         unsigned long rip = vcpu->rip;
1663         unsigned long rip_linear;
1664
1665         rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
1666
1667         if (reported)
1668                 return;
1669
1670         emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu);
1671
1672         printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
1673                context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
1674         reported = 1;
1675 }
1676 EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
1677
1678 struct x86_emulate_ops emulate_ops = {
1679         .read_std            = emulator_read_std,
1680         .read_emulated       = emulator_read_emulated,
1681         .write_emulated      = emulator_write_emulated,
1682         .cmpxchg_emulated    = emulator_cmpxchg_emulated,
1683 };
1684
1685 int emulate_instruction(struct kvm_vcpu *vcpu,
1686                         struct kvm_run *run,
1687                         unsigned long cr2,
1688                         u16 error_code,
1689                         int no_decode)
1690 {
1691         int r;
1692
1693         vcpu->mmio_fault_cr2 = cr2;
1694         kvm_x86_ops->cache_regs(vcpu);
1695
1696         vcpu->mmio_is_write = 0;
1697         vcpu->pio.string = 0;
1698
1699         if (!no_decode) {
1700                 int cs_db, cs_l;
1701                 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
1702
1703                 vcpu->emulate_ctxt.vcpu = vcpu;
1704                 vcpu->emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
1705                 vcpu->emulate_ctxt.mode =
1706                         (vcpu->emulate_ctxt.eflags & X86_EFLAGS_VM)
1707                         ? X86EMUL_MODE_REAL : cs_l
1708                         ? X86EMUL_MODE_PROT64 : cs_db
1709                         ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
1710
1711                 if (vcpu->emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
1712                         vcpu->emulate_ctxt.cs_base = 0;
1713                         vcpu->emulate_ctxt.ds_base = 0;
1714                         vcpu->emulate_ctxt.es_base = 0;
1715                         vcpu->emulate_ctxt.ss_base = 0;
1716                 } else {
1717                         vcpu->emulate_ctxt.cs_base =
1718                                         get_segment_base(vcpu, VCPU_SREG_CS);
1719                         vcpu->emulate_ctxt.ds_base =
1720                                         get_segment_base(vcpu, VCPU_SREG_DS);
1721                         vcpu->emulate_ctxt.es_base =
1722                                         get_segment_base(vcpu, VCPU_SREG_ES);
1723                         vcpu->emulate_ctxt.ss_base =
1724                                         get_segment_base(vcpu, VCPU_SREG_SS);
1725                 }
1726
1727                 vcpu->emulate_ctxt.gs_base =
1728                                         get_segment_base(vcpu, VCPU_SREG_GS);
1729                 vcpu->emulate_ctxt.fs_base =
1730                                         get_segment_base(vcpu, VCPU_SREG_FS);
1731
1732                 r = x86_decode_insn(&vcpu->emulate_ctxt, &emulate_ops);
1733                 ++vcpu->stat.insn_emulation;
1734                 if (r)  {
1735                         ++vcpu->stat.insn_emulation_fail;
1736                         if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
1737                                 return EMULATE_DONE;
1738                         return EMULATE_FAIL;
1739                 }
1740         }
1741
1742         r = x86_emulate_insn(&vcpu->emulate_ctxt, &emulate_ops);
1743
1744         if (vcpu->pio.string)
1745                 return EMULATE_DO_MMIO;
1746
1747         if ((r || vcpu->mmio_is_write) && run) {
1748                 run->exit_reason = KVM_EXIT_MMIO;
1749                 run->mmio.phys_addr = vcpu->mmio_phys_addr;
1750                 memcpy(run->mmio.data, vcpu->mmio_data, 8);
1751                 run->mmio.len = vcpu->mmio_size;
1752                 run->mmio.is_write = vcpu->mmio_is_write;
1753         }
1754
1755         if (r) {
1756                 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
1757                         return EMULATE_DONE;
1758                 if (!vcpu->mmio_needed) {
1759                         kvm_report_emulation_failure(vcpu, "mmio");
1760                         return EMULATE_FAIL;
1761                 }
1762                 return EMULATE_DO_MMIO;
1763         }
1764
1765         kvm_x86_ops->decache_regs(vcpu);
1766         kvm_x86_ops->set_rflags(vcpu, vcpu->emulate_ctxt.eflags);
1767
1768         if (vcpu->mmio_is_write) {
1769                 vcpu->mmio_needed = 0;
1770                 return EMULATE_DO_MMIO;
1771         }
1772
1773         return EMULATE_DONE;
1774 }
1775 EXPORT_SYMBOL_GPL(emulate_instruction);
1776
1777 static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
1778 {
1779         int i;
1780
1781         for (i = 0; i < ARRAY_SIZE(vcpu->pio.guest_pages); ++i)
1782                 if (vcpu->pio.guest_pages[i]) {
1783                         kvm_release_page_dirty(vcpu->pio.guest_pages[i]);
1784                         vcpu->pio.guest_pages[i] = NULL;
1785                 }
1786 }
1787
1788 static int pio_copy_data(struct kvm_vcpu *vcpu)
1789 {
1790         void *p = vcpu->pio_data;
1791         void *q;
1792         unsigned bytes;
1793         int nr_pages = vcpu->pio.guest_pages[1] ? 2 : 1;
1794
1795         q = vmap(vcpu->pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
1796                  PAGE_KERNEL);
1797         if (!q) {
1798                 free_pio_guest_pages(vcpu);
1799                 return -ENOMEM;
1800         }
1801         q += vcpu->pio.guest_page_offset;
1802         bytes = vcpu->pio.size * vcpu->pio.cur_count;
1803         if (vcpu->pio.in)
1804                 memcpy(q, p, bytes);
1805         else
1806                 memcpy(p, q, bytes);
1807         q -= vcpu->pio.guest_page_offset;
1808         vunmap(q);
1809         free_pio_guest_pages(vcpu);
1810         return 0;
1811 }
1812
1813 int complete_pio(struct kvm_vcpu *vcpu)
1814 {
1815         struct kvm_pio_request *io = &vcpu->pio;
1816         long delta;
1817         int r;
1818
1819         kvm_x86_ops->cache_regs(vcpu);
1820
1821         if (!io->string) {
1822                 if (io->in)
1823                         memcpy(&vcpu->regs[VCPU_REGS_RAX], vcpu->pio_data,
1824                                io->size);
1825         } else {
1826                 if (io->in) {
1827                         r = pio_copy_data(vcpu);
1828                         if (r) {
1829                                 kvm_x86_ops->cache_regs(vcpu);
1830                                 return r;
1831                         }
1832                 }
1833
1834                 delta = 1;
1835                 if (io->rep) {
1836                         delta *= io->cur_count;
1837                         /*
1838                          * The size of the register should really depend on
1839                          * current address size.
1840                          */
1841                         vcpu->regs[VCPU_REGS_RCX] -= delta;
1842                 }
1843                 if (io->down)
1844                         delta = -delta;
1845                 delta *= io->size;
1846                 if (io->in)
1847                         vcpu->regs[VCPU_REGS_RDI] += delta;
1848                 else
1849                         vcpu->regs[VCPU_REGS_RSI] += delta;
1850         }
1851
1852         kvm_x86_ops->decache_regs(vcpu);
1853
1854         io->count -= io->cur_count;
1855         io->cur_count = 0;
1856
1857         return 0;
1858 }
1859
1860 static void kernel_pio(struct kvm_io_device *pio_dev,
1861                        struct kvm_vcpu *vcpu,
1862                        void *pd)
1863 {
1864         /* TODO: String I/O for in kernel device */
1865
1866         mutex_lock(&vcpu->kvm->lock);
1867         if (vcpu->pio.in)
1868                 kvm_iodevice_read(pio_dev, vcpu->pio.port,
1869                                   vcpu->pio.size,
1870                                   pd);
1871         else
1872                 kvm_iodevice_write(pio_dev, vcpu->pio.port,
1873                                    vcpu->pio.size,
1874                                    pd);
1875         mutex_unlock(&vcpu->kvm->lock);
1876 }
1877
1878 static void pio_string_write(struct kvm_io_device *pio_dev,
1879                              struct kvm_vcpu *vcpu)
1880 {
1881         struct kvm_pio_request *io = &vcpu->pio;
1882         void *pd = vcpu->pio_data;
1883         int i;
1884
1885         mutex_lock(&vcpu->kvm->lock);
1886         for (i = 0; i < io->cur_count; i++) {
1887                 kvm_iodevice_write(pio_dev, io->port,
1888                                    io->size,
1889                                    pd);
1890                 pd += io->size;
1891         }
1892         mutex_unlock(&vcpu->kvm->lock);
1893 }
1894
1895 static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
1896                                                gpa_t addr)
1897 {
1898         return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr);
1899 }
1900
1901 int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1902                   int size, unsigned port)
1903 {
1904         struct kvm_io_device *pio_dev;
1905
1906         vcpu->run->exit_reason = KVM_EXIT_IO;
1907         vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
1908         vcpu->run->io.size = vcpu->pio.size = size;
1909         vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
1910         vcpu->run->io.count = vcpu->pio.count = vcpu->pio.cur_count = 1;
1911         vcpu->run->io.port = vcpu->pio.port = port;
1912         vcpu->pio.in = in;
1913         vcpu->pio.string = 0;
1914         vcpu->pio.down = 0;
1915         vcpu->pio.guest_page_offset = 0;
1916         vcpu->pio.rep = 0;
1917
1918         kvm_x86_ops->cache_regs(vcpu);
1919         memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4);
1920         kvm_x86_ops->decache_regs(vcpu);
1921
1922         kvm_x86_ops->skip_emulated_instruction(vcpu);
1923
1924         pio_dev = vcpu_find_pio_dev(vcpu, port);
1925         if (pio_dev) {
1926                 kernel_pio(pio_dev, vcpu, vcpu->pio_data);
1927                 complete_pio(vcpu);
1928                 return 1;
1929         }
1930         return 0;
1931 }
1932 EXPORT_SYMBOL_GPL(kvm_emulate_pio);
1933
1934 int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1935                   int size, unsigned long count, int down,
1936                   gva_t address, int rep, unsigned port)
1937 {
1938         unsigned now, in_page;
1939         int i, ret = 0;
1940         int nr_pages = 1;
1941         struct page *page;
1942         struct kvm_io_device *pio_dev;
1943
1944         vcpu->run->exit_reason = KVM_EXIT_IO;
1945         vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
1946         vcpu->run->io.size = vcpu->pio.size = size;
1947         vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
1948         vcpu->run->io.count = vcpu->pio.count = vcpu->pio.cur_count = count;
1949         vcpu->run->io.port = vcpu->pio.port = port;
1950         vcpu->pio.in = in;
1951         vcpu->pio.string = 1;
1952         vcpu->pio.down = down;
1953         vcpu->pio.guest_page_offset = offset_in_page(address);
1954         vcpu->pio.rep = rep;
1955
1956         if (!count) {
1957                 kvm_x86_ops->skip_emulated_instruction(vcpu);
1958                 return 1;
1959         }
1960
1961         if (!down)
1962                 in_page = PAGE_SIZE - offset_in_page(address);
1963         else
1964                 in_page = offset_in_page(address) + size;
1965         now = min(count, (unsigned long)in_page / size);
1966         if (!now) {
1967                 /*
1968                  * String I/O straddles page boundary.  Pin two guest pages
1969                  * so that we satisfy atomicity constraints.  Do just one
1970                  * transaction to avoid complexity.
1971                  */
1972                 nr_pages = 2;
1973                 now = 1;
1974         }
1975         if (down) {
1976                 /*
1977                  * String I/O in reverse.  Yuck.  Kill the guest, fix later.
1978                  */
1979                 pr_unimpl(vcpu, "guest string pio down\n");
1980                 inject_gp(vcpu);
1981                 return 1;
1982         }
1983         vcpu->run->io.count = now;
1984         vcpu->pio.cur_count = now;
1985
1986         if (vcpu->pio.cur_count == vcpu->pio.count)
1987                 kvm_x86_ops->skip_emulated_instruction(vcpu);
1988
1989         for (i = 0; i < nr_pages; ++i) {
1990                 mutex_lock(&vcpu->kvm->lock);
1991                 page = gva_to_page(vcpu, address + i * PAGE_SIZE);
1992                 vcpu->pio.guest_pages[i] = page;
1993                 mutex_unlock(&vcpu->kvm->lock);
1994                 if (!page) {
1995                         inject_gp(vcpu);
1996                         free_pio_guest_pages(vcpu);
1997                         return 1;
1998                 }
1999         }
2000
2001         pio_dev = vcpu_find_pio_dev(vcpu, port);
2002         if (!vcpu->pio.in) {
2003                 /* string PIO write */
2004                 ret = pio_copy_data(vcpu);
2005                 if (ret >= 0 && pio_dev) {
2006                         pio_string_write(pio_dev, vcpu);
2007                         complete_pio(vcpu);
2008                         if (vcpu->pio.count == 0)
2009                                 ret = 1;
2010                 }
2011         } else if (pio_dev)
2012                 pr_unimpl(vcpu, "no string pio read support yet, "
2013                        "port %x size %d count %ld\n",
2014                         port, size, count);
2015
2016         return ret;
2017 }
2018 EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
2019
2020 int kvm_arch_init(void *opaque)
2021 {
2022         int r;
2023         struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
2024
2025         r = kvm_mmu_module_init();
2026         if (r)
2027                 goto out_fail;
2028
2029         kvm_init_msr_list();
2030
2031         if (kvm_x86_ops) {
2032                 printk(KERN_ERR "kvm: already loaded the other module\n");
2033                 r = -EEXIST;
2034                 goto out;
2035         }
2036
2037         if (!ops->cpu_has_kvm_support()) {
2038                 printk(KERN_ERR "kvm: no hardware support\n");
2039                 r = -EOPNOTSUPP;
2040                 goto out;
2041         }
2042         if (ops->disabled_by_bios()) {
2043                 printk(KERN_ERR "kvm: disabled by bios\n");
2044                 r = -EOPNOTSUPP;
2045                 goto out;
2046         }
2047
2048         kvm_x86_ops = ops;
2049         kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
2050         return 0;
2051
2052 out:
2053         kvm_mmu_module_exit();
2054 out_fail:
2055         return r;
2056 }
2057
2058 void kvm_arch_exit(void)
2059 {
2060         kvm_x86_ops = NULL;
2061         kvm_mmu_module_exit();
2062 }
2063
2064 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
2065 {
2066         ++vcpu->stat.halt_exits;
2067         if (irqchip_in_kernel(vcpu->kvm)) {
2068                 vcpu->mp_state = VCPU_MP_STATE_HALTED;
2069                 kvm_vcpu_block(vcpu);
2070                 if (vcpu->mp_state != VCPU_MP_STATE_RUNNABLE)
2071                         return -EINTR;
2072                 return 1;
2073         } else {
2074                 vcpu->run->exit_reason = KVM_EXIT_HLT;
2075                 return 0;
2076         }
2077 }
2078 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
2079
2080 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
2081 {
2082         unsigned long nr, a0, a1, a2, a3, ret;
2083
2084         kvm_x86_ops->cache_regs(vcpu);
2085
2086         nr = vcpu->regs[VCPU_REGS_RAX];
2087         a0 = vcpu->regs[VCPU_REGS_RBX];
2088         a1 = vcpu->regs[VCPU_REGS_RCX];
2089         a2 = vcpu->regs[VCPU_REGS_RDX];
2090         a3 = vcpu->regs[VCPU_REGS_RSI];
2091
2092         if (!is_long_mode(vcpu)) {
2093                 nr &= 0xFFFFFFFF;
2094                 a0 &= 0xFFFFFFFF;
2095                 a1 &= 0xFFFFFFFF;
2096                 a2 &= 0xFFFFFFFF;
2097                 a3 &= 0xFFFFFFFF;
2098         }
2099
2100         switch (nr) {
2101         default:
2102                 ret = -KVM_ENOSYS;
2103                 break;
2104         }
2105         vcpu->regs[VCPU_REGS_RAX] = ret;
2106         kvm_x86_ops->decache_regs(vcpu);
2107         return 0;
2108 }
2109 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
2110
2111 int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
2112 {
2113         char instruction[3];
2114         int ret = 0;
2115
2116         mutex_lock(&vcpu->kvm->lock);
2117
2118         /*
2119          * Blow out the MMU to ensure that no other VCPU has an active mapping
2120          * to ensure that the updated hypercall appears atomically across all
2121          * VCPUs.
2122          */
2123         kvm_mmu_zap_all(vcpu->kvm);
2124
2125         kvm_x86_ops->cache_regs(vcpu);
2126         kvm_x86_ops->patch_hypercall(vcpu, instruction);
2127         if (emulator_write_emulated(vcpu->rip, instruction, 3, vcpu)
2128             != X86EMUL_CONTINUE)
2129                 ret = -EFAULT;
2130
2131         mutex_unlock(&vcpu->kvm->lock);
2132
2133         return ret;
2134 }
2135
2136 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
2137 {
2138         return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
2139 }
2140
2141 void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
2142 {
2143         struct descriptor_table dt = { limit, base };
2144
2145         kvm_x86_ops->set_gdt(vcpu, &dt);
2146 }
2147
2148 void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
2149 {
2150         struct descriptor_table dt = { limit, base };
2151
2152         kvm_x86_ops->set_idt(vcpu, &dt);
2153 }
2154
2155 void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
2156                    unsigned long *rflags)
2157 {
2158         lmsw(vcpu, msw);
2159         *rflags = kvm_x86_ops->get_rflags(vcpu);
2160 }
2161
2162 unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
2163 {
2164         kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2165         switch (cr) {
2166         case 0:
2167                 return vcpu->cr0;
2168         case 2:
2169                 return vcpu->cr2;
2170         case 3:
2171                 return vcpu->cr3;
2172         case 4:
2173                 return vcpu->cr4;
2174         default:
2175                 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
2176                 return 0;
2177         }
2178 }
2179
2180 void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
2181                      unsigned long *rflags)
2182 {
2183         switch (cr) {
2184         case 0:
2185                 set_cr0(vcpu, mk_cr_64(vcpu->cr0, val));
2186                 *rflags = kvm_x86_ops->get_rflags(vcpu);
2187                 break;
2188         case 2:
2189                 vcpu->cr2 = val;
2190                 break;
2191         case 3:
2192                 set_cr3(vcpu, val);
2193                 break;
2194         case 4:
2195                 set_cr4(vcpu, mk_cr_64(vcpu->cr4, val));
2196                 break;
2197         default:
2198                 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
2199         }
2200 }
2201
2202 static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
2203 {
2204         struct kvm_cpuid_entry2 *e = &vcpu->cpuid_entries[i];
2205         int j, nent = vcpu->cpuid_nent;
2206
2207         e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
2208         /* when no next entry is found, the current entry[i] is reselected */
2209         for (j = i + 1; j == i; j = (j + 1) % nent) {
2210                 struct kvm_cpuid_entry2 *ej = &vcpu->cpuid_entries[j];
2211                 if (ej->function == e->function) {
2212                         ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
2213                         return j;
2214                 }
2215         }
2216         return 0; /* silence gcc, even though control never reaches here */
2217 }
2218
2219 /* find an entry with matching function, matching index (if needed), and that
2220  * should be read next (if it's stateful) */
2221 static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
2222         u32 function, u32 index)
2223 {
2224         if (e->function != function)
2225                 return 0;
2226         if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
2227                 return 0;
2228         if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
2229                 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
2230                 return 0;
2231         return 1;
2232 }
2233
2234 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
2235 {
2236         int i;
2237         u32 function, index;
2238         struct kvm_cpuid_entry2 *e, *best;
2239
2240         kvm_x86_ops->cache_regs(vcpu);
2241         function = vcpu->regs[VCPU_REGS_RAX];
2242         index = vcpu->regs[VCPU_REGS_RCX];
2243         vcpu->regs[VCPU_REGS_RAX] = 0;
2244         vcpu->regs[VCPU_REGS_RBX] = 0;
2245         vcpu->regs[VCPU_REGS_RCX] = 0;
2246         vcpu->regs[VCPU_REGS_RDX] = 0;
2247         best = NULL;
2248         for (i = 0; i < vcpu->cpuid_nent; ++i) {
2249                 e = &vcpu->cpuid_entries[i];
2250                 if (is_matching_cpuid_entry(e, function, index)) {
2251                         if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
2252                                 move_to_next_stateful_cpuid_entry(vcpu, i);
2253                         best = e;
2254                         break;
2255                 }
2256                 /*
2257                  * Both basic or both extended?
2258                  */
2259                 if (((e->function ^ function) & 0x80000000) == 0)
2260                         if (!best || e->function > best->function)
2261                                 best = e;
2262         }
2263         if (best) {
2264                 vcpu->regs[VCPU_REGS_RAX] = best->eax;
2265                 vcpu->regs[VCPU_REGS_RBX] = best->ebx;
2266                 vcpu->regs[VCPU_REGS_RCX] = best->ecx;
2267                 vcpu->regs[VCPU_REGS_RDX] = best->edx;
2268         }
2269         kvm_x86_ops->decache_regs(vcpu);
2270         kvm_x86_ops->skip_emulated_instruction(vcpu);
2271 }
2272 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
2273
2274 /*
2275  * Check if userspace requested an interrupt window, and that the
2276  * interrupt window is open.
2277  *
2278  * No need to exit to userspace if we already have an interrupt queued.
2279  */
2280 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
2281                                           struct kvm_run *kvm_run)
2282 {
2283         return (!vcpu->irq_summary &&
2284                 kvm_run->request_interrupt_window &&
2285                 vcpu->interrupt_window_open &&
2286                 (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
2287 }
2288
2289 static void post_kvm_run_save(struct kvm_vcpu *vcpu,
2290                               struct kvm_run *kvm_run)
2291 {
2292         kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
2293         kvm_run->cr8 = get_cr8(vcpu);
2294         kvm_run->apic_base = kvm_get_apic_base(vcpu);
2295         if (irqchip_in_kernel(vcpu->kvm))
2296                 kvm_run->ready_for_interrupt_injection = 1;
2297         else
2298                 kvm_run->ready_for_interrupt_injection =
2299                                         (vcpu->interrupt_window_open &&
2300                                          vcpu->irq_summary == 0);
2301 }
2302
2303 static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2304 {
2305         int r;
2306
2307         if (unlikely(vcpu->mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) {
2308                 pr_debug("vcpu %d received sipi with vector # %x\n",
2309                        vcpu->vcpu_id, vcpu->sipi_vector);
2310                 kvm_lapic_reset(vcpu);
2311                 r = kvm_x86_ops->vcpu_reset(vcpu);
2312                 if (r)
2313                         return r;
2314                 vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
2315         }
2316
2317 preempted:
2318         if (vcpu->guest_debug.enabled)
2319                 kvm_x86_ops->guest_debug_pre(vcpu);
2320
2321 again:
2322         r = kvm_mmu_reload(vcpu);
2323         if (unlikely(r))
2324                 goto out;
2325
2326         kvm_inject_pending_timer_irqs(vcpu);
2327
2328         preempt_disable();
2329
2330         kvm_x86_ops->prepare_guest_switch(vcpu);
2331         kvm_load_guest_fpu(vcpu);
2332
2333         local_irq_disable();
2334
2335         if (signal_pending(current)) {
2336                 local_irq_enable();
2337                 preempt_enable();
2338                 r = -EINTR;
2339                 kvm_run->exit_reason = KVM_EXIT_INTR;
2340                 ++vcpu->stat.signal_exits;
2341                 goto out;
2342         }
2343
2344         if (irqchip_in_kernel(vcpu->kvm))
2345                 kvm_x86_ops->inject_pending_irq(vcpu);
2346         else
2347                 kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
2348
2349         vcpu->guest_mode = 1;
2350         kvm_guest_enter();
2351
2352         if (vcpu->requests)
2353                 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
2354                         kvm_x86_ops->tlb_flush(vcpu);
2355
2356         kvm_x86_ops->run(vcpu, kvm_run);
2357
2358         vcpu->guest_mode = 0;
2359         local_irq_enable();
2360
2361         ++vcpu->stat.exits;
2362
2363         /*
2364          * We must have an instruction between local_irq_enable() and
2365          * kvm_guest_exit(), so the timer interrupt isn't delayed by
2366          * the interrupt shadow.  The stat.exits increment will do nicely.
2367          * But we need to prevent reordering, hence this barrier():
2368          */
2369         barrier();
2370
2371         kvm_guest_exit();
2372
2373         preempt_enable();
2374
2375         /*
2376          * Profile KVM exit RIPs:
2377          */
2378         if (unlikely(prof_on == KVM_PROFILING)) {
2379                 kvm_x86_ops->cache_regs(vcpu);
2380                 profile_hit(KVM_PROFILING, (void *)vcpu->rip);
2381         }
2382
2383         r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
2384
2385         if (r > 0) {
2386                 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
2387                         r = -EINTR;
2388                         kvm_run->exit_reason = KVM_EXIT_INTR;
2389                         ++vcpu->stat.request_irq_exits;
2390                         goto out;
2391                 }
2392                 if (!need_resched())
2393                         goto again;
2394         }
2395
2396 out:
2397         if (r > 0) {
2398                 kvm_resched(vcpu);
2399                 goto preempted;
2400         }
2401
2402         post_kvm_run_save(vcpu, kvm_run);
2403
2404         return r;
2405 }
2406
2407 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2408 {
2409         int r;
2410         sigset_t sigsaved;
2411
2412         vcpu_load(vcpu);
2413
2414         if (unlikely(vcpu->mp_state == VCPU_MP_STATE_UNINITIALIZED)) {
2415                 kvm_vcpu_block(vcpu);
2416                 vcpu_put(vcpu);
2417                 return -EAGAIN;
2418         }
2419
2420         if (vcpu->sigset_active)
2421                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2422
2423         /* re-sync apic's tpr */
2424         if (!irqchip_in_kernel(vcpu->kvm))
2425                 set_cr8(vcpu, kvm_run->cr8);
2426
2427         if (vcpu->pio.cur_count) {
2428                 r = complete_pio(vcpu);
2429                 if (r)
2430                         goto out;
2431         }
2432 #if CONFIG_HAS_IOMEM
2433         if (vcpu->mmio_needed) {
2434                 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
2435                 vcpu->mmio_read_completed = 1;
2436                 vcpu->mmio_needed = 0;
2437                 r = emulate_instruction(vcpu, kvm_run,
2438                                         vcpu->mmio_fault_cr2, 0, 1);
2439                 if (r == EMULATE_DO_MMIO) {
2440                         /*
2441                          * Read-modify-write.  Back to userspace.
2442                          */
2443                         r = 0;
2444                         goto out;
2445                 }
2446         }
2447 #endif
2448         if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
2449                 kvm_x86_ops->cache_regs(vcpu);
2450                 vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
2451                 kvm_x86_ops->decache_regs(vcpu);
2452         }
2453
2454         r = __vcpu_run(vcpu, kvm_run);
2455
2456 out:
2457         if (vcpu->sigset_active)
2458                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2459
2460         vcpu_put(vcpu);
2461         return r;
2462 }
2463
2464 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2465 {
2466         vcpu_load(vcpu);
2467
2468         kvm_x86_ops->cache_regs(vcpu);
2469
2470         regs->rax = vcpu->regs[VCPU_REGS_RAX];
2471         regs->rbx = vcpu->regs[VCPU_REGS_RBX];
2472         regs->rcx = vcpu->regs[VCPU_REGS_RCX];
2473         regs->rdx = vcpu->regs[VCPU_REGS_RDX];
2474         regs->rsi = vcpu->regs[VCPU_REGS_RSI];
2475         regs->rdi = vcpu->regs[VCPU_REGS_RDI];
2476         regs->rsp = vcpu->regs[VCPU_REGS_RSP];
2477         regs->rbp = vcpu->regs[VCPU_REGS_RBP];
2478 #ifdef CONFIG_X86_64
2479         regs->r8 = vcpu->regs[VCPU_REGS_R8];
2480         regs->r9 = vcpu->regs[VCPU_REGS_R9];
2481         regs->r10 = vcpu->regs[VCPU_REGS_R10];
2482         regs->r11 = vcpu->regs[VCPU_REGS_R11];
2483         regs->r12 = vcpu->regs[VCPU_REGS_R12];
2484         regs->r13 = vcpu->regs[VCPU_REGS_R13];
2485         regs->r14 = vcpu->regs[VCPU_REGS_R14];
2486         regs->r15 = vcpu->regs[VCPU_REGS_R15];
2487 #endif
2488
2489         regs->rip = vcpu->rip;
2490         regs->rflags = kvm_x86_ops->get_rflags(vcpu);
2491
2492         /*
2493          * Don't leak debug flags in case they were set for guest debugging
2494          */
2495         if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
2496                 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
2497
2498         vcpu_put(vcpu);
2499
2500         return 0;
2501 }
2502
2503 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2504 {
2505         vcpu_load(vcpu);
2506
2507         vcpu->regs[VCPU_REGS_RAX] = regs->rax;
2508         vcpu->regs[VCPU_REGS_RBX] = regs->rbx;
2509         vcpu->regs[VCPU_REGS_RCX] = regs->rcx;
2510         vcpu->regs[VCPU_REGS_RDX] = regs->rdx;
2511         vcpu->regs[VCPU_REGS_RSI] = regs->rsi;
2512         vcpu->regs[VCPU_REGS_RDI] = regs->rdi;
2513         vcpu->regs[VCPU_REGS_RSP] = regs->rsp;
2514         vcpu->regs[VCPU_REGS_RBP] = regs->rbp;
2515 #ifdef CONFIG_X86_64
2516         vcpu->regs[VCPU_REGS_R8] = regs->r8;
2517         vcpu->regs[VCPU_REGS_R9] = regs->r9;
2518         vcpu->regs[VCPU_REGS_R10] = regs->r10;
2519         vcpu->regs[VCPU_REGS_R11] = regs->r11;
2520         vcpu->regs[VCPU_REGS_R12] = regs->r12;
2521         vcpu->regs[VCPU_REGS_R13] = regs->r13;
2522         vcpu->regs[VCPU_REGS_R14] = regs->r14;
2523         vcpu->regs[VCPU_REGS_R15] = regs->r15;
2524 #endif
2525
2526         vcpu->rip = regs->rip;
2527         kvm_x86_ops->set_rflags(vcpu, regs->rflags);
2528
2529         kvm_x86_ops->decache_regs(vcpu);
2530
2531         vcpu_put(vcpu);
2532
2533         return 0;
2534 }
2535
2536 static void get_segment(struct kvm_vcpu *vcpu,
2537                         struct kvm_segment *var, int seg)
2538 {
2539         return kvm_x86_ops->get_segment(vcpu, var, seg);
2540 }
2541
2542 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
2543 {
2544         struct kvm_segment cs;
2545
2546         get_segment(vcpu, &cs, VCPU_SREG_CS);
2547         *db = cs.db;
2548         *l = cs.l;
2549 }
2550 EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
2551
2552 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2553                                   struct kvm_sregs *sregs)
2554 {
2555         struct descriptor_table dt;
2556         int pending_vec;
2557
2558         vcpu_load(vcpu);
2559
2560         get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
2561         get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
2562         get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
2563         get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
2564         get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
2565         get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
2566
2567         get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
2568         get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
2569
2570         kvm_x86_ops->get_idt(vcpu, &dt);
2571         sregs->idt.limit = dt.limit;
2572         sregs->idt.base = dt.base;
2573         kvm_x86_ops->get_gdt(vcpu, &dt);
2574         sregs->gdt.limit = dt.limit;
2575         sregs->gdt.base = dt.base;
2576
2577         kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2578         sregs->cr0 = vcpu->cr0;
2579         sregs->cr2 = vcpu->cr2;
2580         sregs->cr3 = vcpu->cr3;
2581         sregs->cr4 = vcpu->cr4;
2582         sregs->cr8 = get_cr8(vcpu);
2583         sregs->efer = vcpu->shadow_efer;
2584         sregs->apic_base = kvm_get_apic_base(vcpu);
2585
2586         if (irqchip_in_kernel(vcpu->kvm)) {
2587                 memset(sregs->interrupt_bitmap, 0,
2588                        sizeof sregs->interrupt_bitmap);
2589                 pending_vec = kvm_x86_ops->get_irq(vcpu);
2590                 if (pending_vec >= 0)
2591                         set_bit(pending_vec,
2592                                 (unsigned long *)sregs->interrupt_bitmap);
2593         } else
2594                 memcpy(sregs->interrupt_bitmap, vcpu->irq_pending,
2595                        sizeof sregs->interrupt_bitmap);
2596
2597         vcpu_put(vcpu);
2598
2599         return 0;
2600 }
2601
2602 static void set_segment(struct kvm_vcpu *vcpu,
2603                         struct kvm_segment *var, int seg)
2604 {
2605         return kvm_x86_ops->set_segment(vcpu, var, seg);
2606 }
2607
2608 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2609                                   struct kvm_sregs *sregs)
2610 {
2611         int mmu_reset_needed = 0;
2612         int i, pending_vec, max_bits;
2613         struct descriptor_table dt;
2614
2615         vcpu_load(vcpu);
2616
2617         dt.limit = sregs->idt.limit;
2618         dt.base = sregs->idt.base;
2619         kvm_x86_ops->set_idt(vcpu, &dt);
2620         dt.limit = sregs->gdt.limit;
2621         dt.base = sregs->gdt.base;
2622         kvm_x86_ops->set_gdt(vcpu, &dt);
2623
2624         vcpu->cr2 = sregs->cr2;
2625         mmu_reset_needed |= vcpu->cr3 != sregs->cr3;
2626         vcpu->cr3 = sregs->cr3;
2627
2628         set_cr8(vcpu, sregs->cr8);
2629
2630         mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
2631 #ifdef CONFIG_X86_64
2632         kvm_x86_ops->set_efer(vcpu, sregs->efer);
2633 #endif
2634         kvm_set_apic_base(vcpu, sregs->apic_base);
2635
2636         kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2637
2638         mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
2639         vcpu->cr0 = sregs->cr0;
2640         kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
2641
2642         mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
2643         kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
2644         if (!is_long_mode(vcpu) && is_pae(vcpu))
2645                 load_pdptrs(vcpu, vcpu->cr3);
2646
2647         if (mmu_reset_needed)
2648                 kvm_mmu_reset_context(vcpu);
2649
2650         if (!irqchip_in_kernel(vcpu->kvm)) {
2651                 memcpy(vcpu->irq_pending, sregs->interrupt_bitmap,
2652                        sizeof vcpu->irq_pending);
2653                 vcpu->irq_summary = 0;
2654                 for (i = 0; i < ARRAY_SIZE(vcpu->irq_pending); ++i)
2655                         if (vcpu->irq_pending[i])
2656                                 __set_bit(i, &vcpu->irq_summary);
2657         } else {
2658                 max_bits = (sizeof sregs->interrupt_bitmap) << 3;
2659                 pending_vec = find_first_bit(
2660                         (const unsigned long *)sregs->interrupt_bitmap,
2661                         max_bits);
2662                 /* Only pending external irq is handled here */
2663                 if (pending_vec < max_bits) {
2664                         kvm_x86_ops->set_irq(vcpu, pending_vec);
2665                         pr_debug("Set back pending irq %d\n",
2666                                  pending_vec);
2667                 }
2668         }
2669
2670         set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
2671         set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
2672         set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
2673         set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
2674         set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
2675         set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
2676
2677         set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
2678         set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
2679
2680         vcpu_put(vcpu);
2681
2682         return 0;
2683 }
2684
2685 int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
2686                                     struct kvm_debug_guest *dbg)
2687 {
2688         int r;
2689
2690         vcpu_load(vcpu);
2691
2692         r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
2693
2694         vcpu_put(vcpu);
2695
2696         return r;
2697 }
2698
2699 /*
2700  * fxsave fpu state.  Taken from x86_64/processor.h.  To be killed when
2701  * we have asm/x86/processor.h
2702  */
2703 struct fxsave {
2704         u16     cwd;
2705         u16     swd;
2706         u16     twd;
2707         u16     fop;
2708         u64     rip;
2709         u64     rdp;
2710         u32     mxcsr;
2711         u32     mxcsr_mask;
2712         u32     st_space[32];   /* 8*16 bytes for each FP-reg = 128 bytes */
2713 #ifdef CONFIG_X86_64
2714         u32     xmm_space[64];  /* 16*16 bytes for each XMM-reg = 256 bytes */
2715 #else
2716         u32     xmm_space[32];  /* 8*16 bytes for each XMM-reg = 128 bytes */
2717 #endif
2718 };
2719
2720 /*
2721  * Translate a guest virtual address to a guest physical address.
2722  */
2723 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2724                                     struct kvm_translation *tr)
2725 {
2726         unsigned long vaddr = tr->linear_address;
2727         gpa_t gpa;
2728
2729         vcpu_load(vcpu);
2730         mutex_lock(&vcpu->kvm->lock);
2731         gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr);
2732         tr->physical_address = gpa;
2733         tr->valid = gpa != UNMAPPED_GVA;
2734         tr->writeable = 1;
2735         tr->usermode = 0;
2736         mutex_unlock(&vcpu->kvm->lock);
2737         vcpu_put(vcpu);
2738
2739         return 0;
2740 }
2741
2742 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2743 {
2744         struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
2745
2746         vcpu_load(vcpu);
2747
2748         memcpy(fpu->fpr, fxsave->st_space, 128);
2749         fpu->fcw = fxsave->cwd;
2750         fpu->fsw = fxsave->swd;
2751         fpu->ftwx = fxsave->twd;
2752         fpu->last_opcode = fxsave->fop;
2753         fpu->last_ip = fxsave->rip;
2754         fpu->last_dp = fxsave->rdp;
2755         memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
2756
2757         vcpu_put(vcpu);
2758
2759         return 0;
2760 }
2761
2762 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2763 {
2764         struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
2765
2766         vcpu_load(vcpu);
2767
2768         memcpy(fxsave->st_space, fpu->fpr, 128);
2769         fxsave->cwd = fpu->fcw;
2770         fxsave->swd = fpu->fsw;
2771         fxsave->twd = fpu->ftwx;
2772         fxsave->fop = fpu->last_opcode;
2773         fxsave->rip = fpu->last_ip;
2774         fxsave->rdp = fpu->last_dp;
2775         memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
2776
2777         vcpu_put(vcpu);
2778
2779         return 0;
2780 }
2781
2782 void fx_init(struct kvm_vcpu *vcpu)
2783 {
2784         unsigned after_mxcsr_mask;
2785
2786         /* Initialize guest FPU by resetting ours and saving into guest's */
2787         preempt_disable();
2788         fx_save(&vcpu->host_fx_image);
2789         fpu_init();
2790         fx_save(&vcpu->guest_fx_image);
2791         fx_restore(&vcpu->host_fx_image);
2792         preempt_enable();
2793
2794         vcpu->cr0 |= X86_CR0_ET;
2795         after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
2796         vcpu->guest_fx_image.mxcsr = 0x1f80;
2797         memset((void *)&vcpu->guest_fx_image + after_mxcsr_mask,
2798                0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
2799 }
2800 EXPORT_SYMBOL_GPL(fx_init);
2801
2802 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
2803 {
2804         if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
2805                 return;
2806
2807         vcpu->guest_fpu_loaded = 1;
2808         fx_save(&vcpu->host_fx_image);
2809         fx_restore(&vcpu->guest_fx_image);
2810 }
2811 EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
2812
2813 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
2814 {
2815         if (!vcpu->guest_fpu_loaded)
2816                 return;
2817
2818         vcpu->guest_fpu_loaded = 0;
2819         fx_save(&vcpu->guest_fx_image);
2820         fx_restore(&vcpu->host_fx_image);
2821         ++vcpu->stat.fpu_reload;
2822 }
2823 EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
2824
2825 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
2826 {
2827         kvm_x86_ops->vcpu_free(vcpu);
2828 }
2829
2830 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
2831                                                 unsigned int id)
2832 {
2833         return kvm_x86_ops->vcpu_create(kvm, id);
2834 }
2835
2836 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
2837 {
2838         int r;
2839
2840         /* We do fxsave: this must be aligned. */
2841         BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF);
2842
2843         vcpu_load(vcpu);
2844         r = kvm_arch_vcpu_reset(vcpu);
2845         if (r == 0)
2846                 r = kvm_mmu_setup(vcpu);
2847         vcpu_put(vcpu);
2848         if (r < 0)
2849                 goto free_vcpu;
2850
2851         return 0;
2852 free_vcpu:
2853         kvm_x86_ops->vcpu_free(vcpu);
2854         return r;
2855 }
2856
2857 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2858 {
2859         vcpu_load(vcpu);
2860         kvm_mmu_unload(vcpu);
2861         vcpu_put(vcpu);
2862
2863         kvm_x86_ops->vcpu_free(vcpu);
2864 }
2865
2866 int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
2867 {
2868         return kvm_x86_ops->vcpu_reset(vcpu);
2869 }
2870
2871 void kvm_arch_hardware_enable(void *garbage)
2872 {
2873         kvm_x86_ops->hardware_enable(garbage);
2874 }
2875
2876 void kvm_arch_hardware_disable(void *garbage)
2877 {
2878         kvm_x86_ops->hardware_disable(garbage);
2879 }
2880
2881 int kvm_arch_hardware_setup(void)
2882 {
2883         return kvm_x86_ops->hardware_setup();
2884 }
2885
2886 void kvm_arch_hardware_unsetup(void)
2887 {
2888         kvm_x86_ops->hardware_unsetup();
2889 }
2890
2891 void kvm_arch_check_processor_compat(void *rtn)
2892 {
2893         kvm_x86_ops->check_processor_compatibility(rtn);
2894 }
2895
2896 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
2897 {
2898         struct page *page;
2899         struct kvm *kvm;
2900         int r;
2901
2902         BUG_ON(vcpu->kvm == NULL);
2903         kvm = vcpu->kvm;
2904
2905         vcpu->mmu.root_hpa = INVALID_PAGE;
2906         if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
2907                 vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
2908         else
2909                 vcpu->mp_state = VCPU_MP_STATE_UNINITIALIZED;
2910
2911         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2912         if (!page) {
2913                 r = -ENOMEM;
2914                 goto fail;
2915         }
2916         vcpu->pio_data = page_address(page);
2917
2918         r = kvm_mmu_create(vcpu);
2919         if (r < 0)
2920                 goto fail_free_pio_data;
2921
2922         if (irqchip_in_kernel(kvm)) {
2923                 r = kvm_create_lapic(vcpu);
2924                 if (r < 0)
2925                         goto fail_mmu_destroy;
2926         }
2927
2928         return 0;
2929
2930 fail_mmu_destroy:
2931         kvm_mmu_destroy(vcpu);
2932 fail_free_pio_data:
2933         free_page((unsigned long)vcpu->pio_data);
2934 fail:
2935         return r;
2936 }
2937
2938 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
2939 {
2940         kvm_free_lapic(vcpu);
2941         kvm_mmu_destroy(vcpu);
2942         free_page((unsigned long)vcpu->pio_data);
2943 }
2944
2945 struct  kvm *kvm_arch_create_vm(void)
2946 {
2947         struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
2948
2949         if (!kvm)
2950                 return ERR_PTR(-ENOMEM);
2951
2952         INIT_LIST_HEAD(&kvm->active_mmu_pages);
2953
2954         return kvm;
2955 }
2956
2957 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
2958 {
2959         vcpu_load(vcpu);
2960         kvm_mmu_unload(vcpu);
2961         vcpu_put(vcpu);
2962 }
2963
2964 static void kvm_free_vcpus(struct kvm *kvm)
2965 {
2966         unsigned int i;
2967
2968         /*
2969          * Unpin any mmu pages first.
2970          */
2971         for (i = 0; i < KVM_MAX_VCPUS; ++i)
2972                 if (kvm->vcpus[i])
2973                         kvm_unload_vcpu_mmu(kvm->vcpus[i]);
2974         for (i = 0; i < KVM_MAX_VCPUS; ++i) {
2975                 if (kvm->vcpus[i]) {
2976                         kvm_arch_vcpu_free(kvm->vcpus[i]);
2977                         kvm->vcpus[i] = NULL;
2978                 }
2979         }
2980
2981 }
2982
2983 void kvm_arch_destroy_vm(struct kvm *kvm)
2984 {
2985         kfree(kvm->vpic);
2986         kfree(kvm->vioapic);
2987         kvm_free_vcpus(kvm);
2988         kvm_free_physmem(kvm);
2989         kfree(kvm);
2990 }
2991
2992 int kvm_arch_set_memory_region(struct kvm *kvm,
2993                                 struct kvm_userspace_memory_region *mem,
2994                                 struct kvm_memory_slot old,
2995                                 int user_alloc)
2996 {
2997         int npages = mem->memory_size >> PAGE_SHIFT;
2998         struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
2999
3000         /*To keep backward compatibility with older userspace,
3001          *x86 needs to hanlde !user_alloc case.
3002          */
3003         if (!user_alloc) {
3004                 if (npages && !old.rmap) {
3005                         down_write(&current->mm->mmap_sem);
3006                         memslot->userspace_addr = do_mmap(NULL, 0,
3007                                                      npages * PAGE_SIZE,
3008                                                      PROT_READ | PROT_WRITE,
3009                                                      MAP_SHARED | MAP_ANONYMOUS,
3010                                                      0);
3011                         up_write(&current->mm->mmap_sem);
3012
3013                         if (IS_ERR((void *)memslot->userspace_addr))
3014                                 return PTR_ERR((void *)memslot->userspace_addr);
3015                 } else {
3016                         if (!old.user_alloc && old.rmap) {
3017                                 int ret;
3018
3019                                 down_write(&current->mm->mmap_sem);
3020                                 ret = do_munmap(current->mm, old.userspace_addr,
3021                                                 old.npages * PAGE_SIZE);
3022                                 up_write(&current->mm->mmap_sem);
3023                                 if (ret < 0)
3024                                         printk(KERN_WARNING
3025                                        "kvm_vm_ioctl_set_memory_region: "
3026                                        "failed to munmap memory\n");
3027                         }
3028                 }
3029         }
3030
3031         if (!kvm->n_requested_mmu_pages) {
3032                 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
3033                 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
3034         }
3035
3036         kvm_mmu_slot_remove_write_access(kvm, mem->slot);
3037         kvm_flush_remote_tlbs(kvm);
3038
3039         return 0;
3040 }