]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/kvm/vmx.c
KVM: round robin for APIC lowest priority delivery mode
[linux-2.6-omap-h63xx.git] / drivers / kvm / vmx.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  *
9  * Authors:
10  *   Avi Kivity   <avi@qumranet.com>
11  *   Yaniv Kamay  <yaniv@qumranet.com>
12  *
13  * This work is licensed under the terms of the GNU GPL, version 2.  See
14  * the COPYING file in the top-level directory.
15  *
16  */
17
18 #include "kvm.h"
19 #include "x86_emulate.h"
20 #include "irq.h"
21 #include "vmx.h"
22 #include "segment_descriptor.h"
23
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/highmem.h>
28 #include <linux/profile.h>
29 #include <linux/sched.h>
30
31 #include <asm/io.h>
32 #include <asm/desc.h>
33
34 MODULE_AUTHOR("Qumranet");
35 MODULE_LICENSE("GPL");
36
37 struct vmcs {
38         u32 revision_id;
39         u32 abort;
40         char data[0];
41 };
42
43 struct vcpu_vmx {
44         struct kvm_vcpu       vcpu;
45         int                   launched;
46         struct kvm_msr_entry *guest_msrs;
47         struct kvm_msr_entry *host_msrs;
48         int                   nmsrs;
49         int                   save_nmsrs;
50         int                   msr_offset_efer;
51 #ifdef CONFIG_X86_64
52         int                   msr_offset_kernel_gs_base;
53 #endif
54         struct vmcs          *vmcs;
55         struct {
56                 int           loaded;
57                 u16           fs_sel, gs_sel, ldt_sel;
58                 int           gs_ldt_reload_needed;
59                 int           fs_reload_needed;
60         }host_state;
61
62 };
63
64 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
65 {
66         return container_of(vcpu, struct vcpu_vmx, vcpu);
67 }
68
69 static int init_rmode_tss(struct kvm *kvm);
70
71 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
72 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
73
74 static struct page *vmx_io_bitmap_a;
75 static struct page *vmx_io_bitmap_b;
76
77 #define EFER_SAVE_RESTORE_BITS ((u64)EFER_SCE)
78
79 static struct vmcs_config {
80         int size;
81         int order;
82         u32 revision_id;
83         u32 pin_based_exec_ctrl;
84         u32 cpu_based_exec_ctrl;
85         u32 vmexit_ctrl;
86         u32 vmentry_ctrl;
87 } vmcs_config;
88
89 #define VMX_SEGMENT_FIELD(seg)                                  \
90         [VCPU_SREG_##seg] = {                                   \
91                 .selector = GUEST_##seg##_SELECTOR,             \
92                 .base = GUEST_##seg##_BASE,                     \
93                 .limit = GUEST_##seg##_LIMIT,                   \
94                 .ar_bytes = GUEST_##seg##_AR_BYTES,             \
95         }
96
97 static struct kvm_vmx_segment_field {
98         unsigned selector;
99         unsigned base;
100         unsigned limit;
101         unsigned ar_bytes;
102 } kvm_vmx_segment_fields[] = {
103         VMX_SEGMENT_FIELD(CS),
104         VMX_SEGMENT_FIELD(DS),
105         VMX_SEGMENT_FIELD(ES),
106         VMX_SEGMENT_FIELD(FS),
107         VMX_SEGMENT_FIELD(GS),
108         VMX_SEGMENT_FIELD(SS),
109         VMX_SEGMENT_FIELD(TR),
110         VMX_SEGMENT_FIELD(LDTR),
111 };
112
113 /*
114  * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it
115  * away by decrementing the array size.
116  */
117 static const u32 vmx_msr_index[] = {
118 #ifdef CONFIG_X86_64
119         MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
120 #endif
121         MSR_EFER, MSR_K6_STAR,
122 };
123 #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
124
125 static void load_msrs(struct kvm_msr_entry *e, int n)
126 {
127         int i;
128
129         for (i = 0; i < n; ++i)
130                 wrmsrl(e[i].index, e[i].data);
131 }
132
133 static void save_msrs(struct kvm_msr_entry *e, int n)
134 {
135         int i;
136
137         for (i = 0; i < n; ++i)
138                 rdmsrl(e[i].index, e[i].data);
139 }
140
141 static inline u64 msr_efer_save_restore_bits(struct kvm_msr_entry msr)
142 {
143         return (u64)msr.data & EFER_SAVE_RESTORE_BITS;
144 }
145
146 static inline int msr_efer_need_save_restore(struct vcpu_vmx *vmx)
147 {
148         int efer_offset = vmx->msr_offset_efer;
149         return msr_efer_save_restore_bits(vmx->host_msrs[efer_offset]) !=
150                 msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
151 }
152
153 static inline int is_page_fault(u32 intr_info)
154 {
155         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
156                              INTR_INFO_VALID_MASK)) ==
157                 (INTR_TYPE_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
158 }
159
160 static inline int is_no_device(u32 intr_info)
161 {
162         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
163                              INTR_INFO_VALID_MASK)) ==
164                 (INTR_TYPE_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
165 }
166
167 static inline int is_external_interrupt(u32 intr_info)
168 {
169         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
170                 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
171 }
172
173 static inline int cpu_has_vmx_tpr_shadow(void)
174 {
175         return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW);
176 }
177
178 static inline int vm_need_tpr_shadow(struct kvm *kvm)
179 {
180         return ((cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm)));
181 }
182
183 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
184 {
185         int i;
186
187         for (i = 0; i < vmx->nmsrs; ++i)
188                 if (vmx->guest_msrs[i].index == msr)
189                         return i;
190         return -1;
191 }
192
193 static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
194 {
195         int i;
196
197         i = __find_msr_index(vmx, msr);
198         if (i >= 0)
199                 return &vmx->guest_msrs[i];
200         return NULL;
201 }
202
203 static void vmcs_clear(struct vmcs *vmcs)
204 {
205         u64 phys_addr = __pa(vmcs);
206         u8 error;
207
208         asm volatile (ASM_VMX_VMCLEAR_RAX "; setna %0"
209                       : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
210                       : "cc", "memory");
211         if (error)
212                 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
213                        vmcs, phys_addr);
214 }
215
216 static void __vcpu_clear(void *arg)
217 {
218         struct vcpu_vmx *vmx = arg;
219         int cpu = raw_smp_processor_id();
220
221         if (vmx->vcpu.cpu == cpu)
222                 vmcs_clear(vmx->vmcs);
223         if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
224                 per_cpu(current_vmcs, cpu) = NULL;
225         rdtscll(vmx->vcpu.host_tsc);
226 }
227
228 static void vcpu_clear(struct vcpu_vmx *vmx)
229 {
230         if (vmx->vcpu.cpu != raw_smp_processor_id() && vmx->vcpu.cpu != -1)
231                 smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear,
232                                          vmx, 0, 1);
233         else
234                 __vcpu_clear(vmx);
235         vmx->launched = 0;
236 }
237
238 static unsigned long vmcs_readl(unsigned long field)
239 {
240         unsigned long value;
241
242         asm volatile (ASM_VMX_VMREAD_RDX_RAX
243                       : "=a"(value) : "d"(field) : "cc");
244         return value;
245 }
246
247 static u16 vmcs_read16(unsigned long field)
248 {
249         return vmcs_readl(field);
250 }
251
252 static u32 vmcs_read32(unsigned long field)
253 {
254         return vmcs_readl(field);
255 }
256
257 static u64 vmcs_read64(unsigned long field)
258 {
259 #ifdef CONFIG_X86_64
260         return vmcs_readl(field);
261 #else
262         return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
263 #endif
264 }
265
266 static noinline void vmwrite_error(unsigned long field, unsigned long value)
267 {
268         printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
269                field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
270         dump_stack();
271 }
272
273 static void vmcs_writel(unsigned long field, unsigned long value)
274 {
275         u8 error;
276
277         asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0"
278                        : "=q"(error) : "a"(value), "d"(field) : "cc" );
279         if (unlikely(error))
280                 vmwrite_error(field, value);
281 }
282
283 static void vmcs_write16(unsigned long field, u16 value)
284 {
285         vmcs_writel(field, value);
286 }
287
288 static void vmcs_write32(unsigned long field, u32 value)
289 {
290         vmcs_writel(field, value);
291 }
292
293 static void vmcs_write64(unsigned long field, u64 value)
294 {
295 #ifdef CONFIG_X86_64
296         vmcs_writel(field, value);
297 #else
298         vmcs_writel(field, value);
299         asm volatile ("");
300         vmcs_writel(field+1, value >> 32);
301 #endif
302 }
303
304 static void vmcs_clear_bits(unsigned long field, u32 mask)
305 {
306         vmcs_writel(field, vmcs_readl(field) & ~mask);
307 }
308
309 static void vmcs_set_bits(unsigned long field, u32 mask)
310 {
311         vmcs_writel(field, vmcs_readl(field) | mask);
312 }
313
314 static void update_exception_bitmap(struct kvm_vcpu *vcpu)
315 {
316         u32 eb;
317
318         eb = 1u << PF_VECTOR;
319         if (!vcpu->fpu_active)
320                 eb |= 1u << NM_VECTOR;
321         if (vcpu->guest_debug.enabled)
322                 eb |= 1u << 1;
323         if (vcpu->rmode.active)
324                 eb = ~0;
325         vmcs_write32(EXCEPTION_BITMAP, eb);
326 }
327
328 static void reload_tss(void)
329 {
330 #ifndef CONFIG_X86_64
331
332         /*
333          * VT restores TR but not its size.  Useless.
334          */
335         struct descriptor_table gdt;
336         struct segment_descriptor *descs;
337
338         get_gdt(&gdt);
339         descs = (void *)gdt.base;
340         descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
341         load_TR_desc();
342 #endif
343 }
344
345 static void load_transition_efer(struct vcpu_vmx *vmx)
346 {
347         u64 trans_efer;
348         int efer_offset = vmx->msr_offset_efer;
349
350         trans_efer = vmx->host_msrs[efer_offset].data;
351         trans_efer &= ~EFER_SAVE_RESTORE_BITS;
352         trans_efer |= msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
353         wrmsrl(MSR_EFER, trans_efer);
354         vmx->vcpu.stat.efer_reload++;
355 }
356
357 static void vmx_save_host_state(struct vcpu_vmx *vmx)
358 {
359         if (vmx->host_state.loaded)
360                 return;
361
362         vmx->host_state.loaded = 1;
363         /*
364          * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
365          * allow segment selectors with cpl > 0 or ti == 1.
366          */
367         vmx->host_state.ldt_sel = read_ldt();
368         vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
369         vmx->host_state.fs_sel = read_fs();
370         if (!(vmx->host_state.fs_sel & 7)) {
371                 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
372                 vmx->host_state.fs_reload_needed = 0;
373         } else {
374                 vmcs_write16(HOST_FS_SELECTOR, 0);
375                 vmx->host_state.fs_reload_needed = 1;
376         }
377         vmx->host_state.gs_sel = read_gs();
378         if (!(vmx->host_state.gs_sel & 7))
379                 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
380         else {
381                 vmcs_write16(HOST_GS_SELECTOR, 0);
382                 vmx->host_state.gs_ldt_reload_needed = 1;
383         }
384
385 #ifdef CONFIG_X86_64
386         vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
387         vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
388 #else
389         vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
390         vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
391 #endif
392
393 #ifdef CONFIG_X86_64
394         if (is_long_mode(&vmx->vcpu)) {
395                 save_msrs(vmx->host_msrs +
396                           vmx->msr_offset_kernel_gs_base, 1);
397         }
398 #endif
399         load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
400         if (msr_efer_need_save_restore(vmx))
401                 load_transition_efer(vmx);
402 }
403
404 static void vmx_load_host_state(struct vcpu_vmx *vmx)
405 {
406         unsigned long flags;
407
408         if (!vmx->host_state.loaded)
409                 return;
410
411         vmx->host_state.loaded = 0;
412         if (vmx->host_state.fs_reload_needed)
413                 load_fs(vmx->host_state.fs_sel);
414         if (vmx->host_state.gs_ldt_reload_needed) {
415                 load_ldt(vmx->host_state.ldt_sel);
416                 /*
417                  * If we have to reload gs, we must take care to
418                  * preserve our gs base.
419                  */
420                 local_irq_save(flags);
421                 load_gs(vmx->host_state.gs_sel);
422 #ifdef CONFIG_X86_64
423                 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
424 #endif
425                 local_irq_restore(flags);
426         }
427         reload_tss();
428         save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
429         load_msrs(vmx->host_msrs, vmx->save_nmsrs);
430         if (msr_efer_need_save_restore(vmx))
431                 load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);
432 }
433
434 /*
435  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
436  * vcpu mutex is already taken.
437  */
438 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
439 {
440         struct vcpu_vmx *vmx = to_vmx(vcpu);
441         u64 phys_addr = __pa(vmx->vmcs);
442         u64 tsc_this, delta;
443
444         if (vcpu->cpu != cpu) {
445                 vcpu_clear(vmx);
446                 kvm_migrate_apic_timer(vcpu);
447         }
448
449         if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
450                 u8 error;
451
452                 per_cpu(current_vmcs, cpu) = vmx->vmcs;
453                 asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0"
454                               : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
455                               : "cc");
456                 if (error)
457                         printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
458                                vmx->vmcs, phys_addr);
459         }
460
461         if (vcpu->cpu != cpu) {
462                 struct descriptor_table dt;
463                 unsigned long sysenter_esp;
464
465                 vcpu->cpu = cpu;
466                 /*
467                  * Linux uses per-cpu TSS and GDT, so set these when switching
468                  * processors.
469                  */
470                 vmcs_writel(HOST_TR_BASE, read_tr_base()); /* 22.2.4 */
471                 get_gdt(&dt);
472                 vmcs_writel(HOST_GDTR_BASE, dt.base);   /* 22.2.4 */
473
474                 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
475                 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
476
477                 /*
478                  * Make sure the time stamp counter is monotonous.
479                  */
480                 rdtscll(tsc_this);
481                 delta = vcpu->host_tsc - tsc_this;
482                 vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta);
483         }
484 }
485
486 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
487 {
488         vmx_load_host_state(to_vmx(vcpu));
489         kvm_put_guest_fpu(vcpu);
490 }
491
492 static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
493 {
494         if (vcpu->fpu_active)
495                 return;
496         vcpu->fpu_active = 1;
497         vmcs_clear_bits(GUEST_CR0, X86_CR0_TS);
498         if (vcpu->cr0 & X86_CR0_TS)
499                 vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
500         update_exception_bitmap(vcpu);
501 }
502
503 static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
504 {
505         if (!vcpu->fpu_active)
506                 return;
507         vcpu->fpu_active = 0;
508         vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
509         update_exception_bitmap(vcpu);
510 }
511
512 static void vmx_vcpu_decache(struct kvm_vcpu *vcpu)
513 {
514         vcpu_clear(to_vmx(vcpu));
515 }
516
517 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
518 {
519         return vmcs_readl(GUEST_RFLAGS);
520 }
521
522 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
523 {
524         vmcs_writel(GUEST_RFLAGS, rflags);
525 }
526
527 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
528 {
529         unsigned long rip;
530         u32 interruptibility;
531
532         rip = vmcs_readl(GUEST_RIP);
533         rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
534         vmcs_writel(GUEST_RIP, rip);
535
536         /*
537          * We emulated an instruction, so temporary interrupt blocking
538          * should be removed, if set.
539          */
540         interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
541         if (interruptibility & 3)
542                 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
543                              interruptibility & ~3);
544         vcpu->interrupt_window_open = 1;
545 }
546
547 static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
548 {
549         printk(KERN_DEBUG "inject_general_protection: rip 0x%lx\n",
550                vmcs_readl(GUEST_RIP));
551         vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
552         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
553                      GP_VECTOR |
554                      INTR_TYPE_EXCEPTION |
555                      INTR_INFO_DELIEVER_CODE_MASK |
556                      INTR_INFO_VALID_MASK);
557 }
558
559 /*
560  * Swap MSR entry in host/guest MSR entry array.
561  */
562 #ifdef CONFIG_X86_64
563 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
564 {
565         struct kvm_msr_entry tmp;
566
567         tmp = vmx->guest_msrs[to];
568         vmx->guest_msrs[to] = vmx->guest_msrs[from];
569         vmx->guest_msrs[from] = tmp;
570         tmp = vmx->host_msrs[to];
571         vmx->host_msrs[to] = vmx->host_msrs[from];
572         vmx->host_msrs[from] = tmp;
573 }
574 #endif
575
576 /*
577  * Set up the vmcs to automatically save and restore system
578  * msrs.  Don't touch the 64-bit msrs if the guest is in legacy
579  * mode, as fiddling with msrs is very expensive.
580  */
581 static void setup_msrs(struct vcpu_vmx *vmx)
582 {
583         int save_nmsrs;
584
585         save_nmsrs = 0;
586 #ifdef CONFIG_X86_64
587         if (is_long_mode(&vmx->vcpu)) {
588                 int index;
589
590                 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
591                 if (index >= 0)
592                         move_msr_up(vmx, index, save_nmsrs++);
593                 index = __find_msr_index(vmx, MSR_LSTAR);
594                 if (index >= 0)
595                         move_msr_up(vmx, index, save_nmsrs++);
596                 index = __find_msr_index(vmx, MSR_CSTAR);
597                 if (index >= 0)
598                         move_msr_up(vmx, index, save_nmsrs++);
599                 index = __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
600                 if (index >= 0)
601                         move_msr_up(vmx, index, save_nmsrs++);
602                 /*
603                  * MSR_K6_STAR is only needed on long mode guests, and only
604                  * if efer.sce is enabled.
605                  */
606                 index = __find_msr_index(vmx, MSR_K6_STAR);
607                 if ((index >= 0) && (vmx->vcpu.shadow_efer & EFER_SCE))
608                         move_msr_up(vmx, index, save_nmsrs++);
609         }
610 #endif
611         vmx->save_nmsrs = save_nmsrs;
612
613 #ifdef CONFIG_X86_64
614         vmx->msr_offset_kernel_gs_base =
615                 __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
616 #endif
617         vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER);
618 }
619
620 /*
621  * reads and returns guest's timestamp counter "register"
622  * guest_tsc = host_tsc + tsc_offset    -- 21.3
623  */
624 static u64 guest_read_tsc(void)
625 {
626         u64 host_tsc, tsc_offset;
627
628         rdtscll(host_tsc);
629         tsc_offset = vmcs_read64(TSC_OFFSET);
630         return host_tsc + tsc_offset;
631 }
632
633 /*
634  * writes 'guest_tsc' into guest's timestamp counter "register"
635  * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc
636  */
637 static void guest_write_tsc(u64 guest_tsc)
638 {
639         u64 host_tsc;
640
641         rdtscll(host_tsc);
642         vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc);
643 }
644
645 /*
646  * Reads an msr value (of 'msr_index') into 'pdata'.
647  * Returns 0 on success, non-0 otherwise.
648  * Assumes vcpu_load() was already called.
649  */
650 static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
651 {
652         u64 data;
653         struct kvm_msr_entry *msr;
654
655         if (!pdata) {
656                 printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
657                 return -EINVAL;
658         }
659
660         switch (msr_index) {
661 #ifdef CONFIG_X86_64
662         case MSR_FS_BASE:
663                 data = vmcs_readl(GUEST_FS_BASE);
664                 break;
665         case MSR_GS_BASE:
666                 data = vmcs_readl(GUEST_GS_BASE);
667                 break;
668         case MSR_EFER:
669                 return kvm_get_msr_common(vcpu, msr_index, pdata);
670 #endif
671         case MSR_IA32_TIME_STAMP_COUNTER:
672                 data = guest_read_tsc();
673                 break;
674         case MSR_IA32_SYSENTER_CS:
675                 data = vmcs_read32(GUEST_SYSENTER_CS);
676                 break;
677         case MSR_IA32_SYSENTER_EIP:
678                 data = vmcs_readl(GUEST_SYSENTER_EIP);
679                 break;
680         case MSR_IA32_SYSENTER_ESP:
681                 data = vmcs_readl(GUEST_SYSENTER_ESP);
682                 break;
683         default:
684                 msr = find_msr_entry(to_vmx(vcpu), msr_index);
685                 if (msr) {
686                         data = msr->data;
687                         break;
688                 }
689                 return kvm_get_msr_common(vcpu, msr_index, pdata);
690         }
691
692         *pdata = data;
693         return 0;
694 }
695
696 /*
697  * Writes msr value into into the appropriate "register".
698  * Returns 0 on success, non-0 otherwise.
699  * Assumes vcpu_load() was already called.
700  */
701 static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
702 {
703         struct vcpu_vmx *vmx = to_vmx(vcpu);
704         struct kvm_msr_entry *msr;
705         int ret = 0;
706
707         switch (msr_index) {
708 #ifdef CONFIG_X86_64
709         case MSR_EFER:
710                 ret = kvm_set_msr_common(vcpu, msr_index, data);
711                 if (vmx->host_state.loaded)
712                         load_transition_efer(vmx);
713                 break;
714         case MSR_FS_BASE:
715                 vmcs_writel(GUEST_FS_BASE, data);
716                 break;
717         case MSR_GS_BASE:
718                 vmcs_writel(GUEST_GS_BASE, data);
719                 break;
720 #endif
721         case MSR_IA32_SYSENTER_CS:
722                 vmcs_write32(GUEST_SYSENTER_CS, data);
723                 break;
724         case MSR_IA32_SYSENTER_EIP:
725                 vmcs_writel(GUEST_SYSENTER_EIP, data);
726                 break;
727         case MSR_IA32_SYSENTER_ESP:
728                 vmcs_writel(GUEST_SYSENTER_ESP, data);
729                 break;
730         case MSR_IA32_TIME_STAMP_COUNTER:
731                 guest_write_tsc(data);
732                 break;
733         default:
734                 msr = find_msr_entry(vmx, msr_index);
735                 if (msr) {
736                         msr->data = data;
737                         if (vmx->host_state.loaded)
738                                 load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
739                         break;
740                 }
741                 ret = kvm_set_msr_common(vcpu, msr_index, data);
742         }
743
744         return ret;
745 }
746
747 /*
748  * Sync the rsp and rip registers into the vcpu structure.  This allows
749  * registers to be accessed by indexing vcpu->regs.
750  */
751 static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu)
752 {
753         vcpu->regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
754         vcpu->rip = vmcs_readl(GUEST_RIP);
755 }
756
757 /*
758  * Syncs rsp and rip back into the vmcs.  Should be called after possible
759  * modification.
760  */
761 static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu)
762 {
763         vmcs_writel(GUEST_RSP, vcpu->regs[VCPU_REGS_RSP]);
764         vmcs_writel(GUEST_RIP, vcpu->rip);
765 }
766
767 static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
768 {
769         unsigned long dr7 = 0x400;
770         int old_singlestep;
771
772         old_singlestep = vcpu->guest_debug.singlestep;
773
774         vcpu->guest_debug.enabled = dbg->enabled;
775         if (vcpu->guest_debug.enabled) {
776                 int i;
777
778                 dr7 |= 0x200;  /* exact */
779                 for (i = 0; i < 4; ++i) {
780                         if (!dbg->breakpoints[i].enabled)
781                                 continue;
782                         vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address;
783                         dr7 |= 2 << (i*2);    /* global enable */
784                         dr7 |= 0 << (i*4+16); /* execution breakpoint */
785                 }
786
787                 vcpu->guest_debug.singlestep = dbg->singlestep;
788         } else
789                 vcpu->guest_debug.singlestep = 0;
790
791         if (old_singlestep && !vcpu->guest_debug.singlestep) {
792                 unsigned long flags;
793
794                 flags = vmcs_readl(GUEST_RFLAGS);
795                 flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
796                 vmcs_writel(GUEST_RFLAGS, flags);
797         }
798
799         update_exception_bitmap(vcpu);
800         vmcs_writel(GUEST_DR7, dr7);
801
802         return 0;
803 }
804
805 static int vmx_get_irq(struct kvm_vcpu *vcpu)
806 {
807         u32 idtv_info_field;
808
809         idtv_info_field = vmcs_read32(IDT_VECTORING_INFO_FIELD);
810         if (idtv_info_field & INTR_INFO_VALID_MASK) {
811                 if (is_external_interrupt(idtv_info_field))
812                         return idtv_info_field & VECTORING_INFO_VECTOR_MASK;
813                 else
814                         printk("pending exception: not handled yet\n");
815         }
816         return -1;
817 }
818
819 static __init int cpu_has_kvm_support(void)
820 {
821         unsigned long ecx = cpuid_ecx(1);
822         return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */
823 }
824
825 static __init int vmx_disabled_by_bios(void)
826 {
827         u64 msr;
828
829         rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
830         return (msr & (MSR_IA32_FEATURE_CONTROL_LOCKED |
831                        MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
832             == MSR_IA32_FEATURE_CONTROL_LOCKED;
833         /* locked but not enabled */
834 }
835
836 static void hardware_enable(void *garbage)
837 {
838         int cpu = raw_smp_processor_id();
839         u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
840         u64 old;
841
842         rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
843         if ((old & (MSR_IA32_FEATURE_CONTROL_LOCKED |
844                     MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
845             != (MSR_IA32_FEATURE_CONTROL_LOCKED |
846                 MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
847                 /* enable and lock */
848                 wrmsrl(MSR_IA32_FEATURE_CONTROL, old |
849                        MSR_IA32_FEATURE_CONTROL_LOCKED |
850                        MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED);
851         write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
852         asm volatile (ASM_VMX_VMXON_RAX : : "a"(&phys_addr), "m"(phys_addr)
853                       : "memory", "cc");
854 }
855
856 static void hardware_disable(void *garbage)
857 {
858         asm volatile (ASM_VMX_VMXOFF : : : "cc");
859 }
860
861 static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
862                                       u32 msr, u32* result)
863 {
864         u32 vmx_msr_low, vmx_msr_high;
865         u32 ctl = ctl_min | ctl_opt;
866
867         rdmsr(msr, vmx_msr_low, vmx_msr_high);
868
869         ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
870         ctl |= vmx_msr_low;  /* bit == 1 in low word  ==> must be one  */
871
872         /* Ensure minimum (required) set of control bits are supported. */
873         if (ctl_min & ~ctl)
874                 return -EIO;
875
876         *result = ctl;
877         return 0;
878 }
879
880 static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
881 {
882         u32 vmx_msr_low, vmx_msr_high;
883         u32 min, opt;
884         u32 _pin_based_exec_control = 0;
885         u32 _cpu_based_exec_control = 0;
886         u32 _vmexit_control = 0;
887         u32 _vmentry_control = 0;
888
889         min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
890         opt = 0;
891         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
892                                 &_pin_based_exec_control) < 0)
893                 return -EIO;
894
895         min = CPU_BASED_HLT_EXITING |
896 #ifdef CONFIG_X86_64
897               CPU_BASED_CR8_LOAD_EXITING |
898               CPU_BASED_CR8_STORE_EXITING |
899 #endif
900               CPU_BASED_USE_IO_BITMAPS |
901               CPU_BASED_MOV_DR_EXITING |
902               CPU_BASED_USE_TSC_OFFSETING;
903 #ifdef CONFIG_X86_64
904         opt = CPU_BASED_TPR_SHADOW;
905 #else
906         opt = 0;
907 #endif
908         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
909                                 &_cpu_based_exec_control) < 0)
910                 return -EIO;
911 #ifdef CONFIG_X86_64
912         if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
913                 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
914                                            ~CPU_BASED_CR8_STORE_EXITING;
915 #endif
916
917         min = 0;
918 #ifdef CONFIG_X86_64
919         min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
920 #endif
921         opt = 0;
922         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
923                                 &_vmexit_control) < 0)
924                 return -EIO;
925
926         min = opt = 0;
927         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
928                                 &_vmentry_control) < 0)
929                 return -EIO;
930
931         rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
932
933         /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
934         if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
935                 return -EIO;
936
937 #ifdef CONFIG_X86_64
938         /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
939         if (vmx_msr_high & (1u<<16))
940                 return -EIO;
941 #endif
942
943         /* Require Write-Back (WB) memory type for VMCS accesses. */
944         if (((vmx_msr_high >> 18) & 15) != 6)
945                 return -EIO;
946
947         vmcs_conf->size = vmx_msr_high & 0x1fff;
948         vmcs_conf->order = get_order(vmcs_config.size);
949         vmcs_conf->revision_id = vmx_msr_low;
950
951         vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
952         vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
953         vmcs_conf->vmexit_ctrl         = _vmexit_control;
954         vmcs_conf->vmentry_ctrl        = _vmentry_control;
955
956         return 0;
957 }
958
959 static struct vmcs *alloc_vmcs_cpu(int cpu)
960 {
961         int node = cpu_to_node(cpu);
962         struct page *pages;
963         struct vmcs *vmcs;
964
965         pages = alloc_pages_node(node, GFP_KERNEL, vmcs_config.order);
966         if (!pages)
967                 return NULL;
968         vmcs = page_address(pages);
969         memset(vmcs, 0, vmcs_config.size);
970         vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
971         return vmcs;
972 }
973
974 static struct vmcs *alloc_vmcs(void)
975 {
976         return alloc_vmcs_cpu(raw_smp_processor_id());
977 }
978
979 static void free_vmcs(struct vmcs *vmcs)
980 {
981         free_pages((unsigned long)vmcs, vmcs_config.order);
982 }
983
984 static void free_kvm_area(void)
985 {
986         int cpu;
987
988         for_each_online_cpu(cpu)
989                 free_vmcs(per_cpu(vmxarea, cpu));
990 }
991
992 static __init int alloc_kvm_area(void)
993 {
994         int cpu;
995
996         for_each_online_cpu(cpu) {
997                 struct vmcs *vmcs;
998
999                 vmcs = alloc_vmcs_cpu(cpu);
1000                 if (!vmcs) {
1001                         free_kvm_area();
1002                         return -ENOMEM;
1003                 }
1004
1005                 per_cpu(vmxarea, cpu) = vmcs;
1006         }
1007         return 0;
1008 }
1009
1010 static __init int hardware_setup(void)
1011 {
1012         if (setup_vmcs_config(&vmcs_config) < 0)
1013                 return -EIO;
1014         return alloc_kvm_area();
1015 }
1016
1017 static __exit void hardware_unsetup(void)
1018 {
1019         free_kvm_area();
1020 }
1021
1022 static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
1023 {
1024         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1025
1026         if (vmcs_readl(sf->base) == save->base && (save->base & AR_S_MASK)) {
1027                 vmcs_write16(sf->selector, save->selector);
1028                 vmcs_writel(sf->base, save->base);
1029                 vmcs_write32(sf->limit, save->limit);
1030                 vmcs_write32(sf->ar_bytes, save->ar);
1031         } else {
1032                 u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK)
1033                         << AR_DPL_SHIFT;
1034                 vmcs_write32(sf->ar_bytes, 0x93 | dpl);
1035         }
1036 }
1037
1038 static void enter_pmode(struct kvm_vcpu *vcpu)
1039 {
1040         unsigned long flags;
1041
1042         vcpu->rmode.active = 0;
1043
1044         vmcs_writel(GUEST_TR_BASE, vcpu->rmode.tr.base);
1045         vmcs_write32(GUEST_TR_LIMIT, vcpu->rmode.tr.limit);
1046         vmcs_write32(GUEST_TR_AR_BYTES, vcpu->rmode.tr.ar);
1047
1048         flags = vmcs_readl(GUEST_RFLAGS);
1049         flags &= ~(IOPL_MASK | X86_EFLAGS_VM);
1050         flags |= (vcpu->rmode.save_iopl << IOPL_SHIFT);
1051         vmcs_writel(GUEST_RFLAGS, flags);
1052
1053         vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
1054                         (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
1055
1056         update_exception_bitmap(vcpu);
1057
1058         fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->rmode.es);
1059         fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->rmode.ds);
1060         fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->rmode.gs);
1061         fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->rmode.fs);
1062
1063         vmcs_write16(GUEST_SS_SELECTOR, 0);
1064         vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
1065
1066         vmcs_write16(GUEST_CS_SELECTOR,
1067                      vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK);
1068         vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
1069 }
1070
1071 static gva_t rmode_tss_base(struct kvm* kvm)
1072 {
1073         gfn_t base_gfn = kvm->memslots[0].base_gfn + kvm->memslots[0].npages - 3;
1074         return base_gfn << PAGE_SHIFT;
1075 }
1076
1077 static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
1078 {
1079         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1080
1081         save->selector = vmcs_read16(sf->selector);
1082         save->base = vmcs_readl(sf->base);
1083         save->limit = vmcs_read32(sf->limit);
1084         save->ar = vmcs_read32(sf->ar_bytes);
1085         vmcs_write16(sf->selector, vmcs_readl(sf->base) >> 4);
1086         vmcs_write32(sf->limit, 0xffff);
1087         vmcs_write32(sf->ar_bytes, 0xf3);
1088 }
1089
1090 static void enter_rmode(struct kvm_vcpu *vcpu)
1091 {
1092         unsigned long flags;
1093
1094         vcpu->rmode.active = 1;
1095
1096         vcpu->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
1097         vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
1098
1099         vcpu->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
1100         vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
1101
1102         vcpu->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
1103         vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1104
1105         flags = vmcs_readl(GUEST_RFLAGS);
1106         vcpu->rmode.save_iopl = (flags & IOPL_MASK) >> IOPL_SHIFT;
1107
1108         flags |= IOPL_MASK | X86_EFLAGS_VM;
1109
1110         vmcs_writel(GUEST_RFLAGS, flags);
1111         vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
1112         update_exception_bitmap(vcpu);
1113
1114         vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4);
1115         vmcs_write32(GUEST_SS_LIMIT, 0xffff);
1116         vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
1117
1118         vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
1119         vmcs_write32(GUEST_CS_LIMIT, 0xffff);
1120         if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
1121                 vmcs_writel(GUEST_CS_BASE, 0xf0000);
1122         vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
1123
1124         fix_rmode_seg(VCPU_SREG_ES, &vcpu->rmode.es);
1125         fix_rmode_seg(VCPU_SREG_DS, &vcpu->rmode.ds);
1126         fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs);
1127         fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs);
1128
1129         init_rmode_tss(vcpu->kvm);
1130 }
1131
1132 #ifdef CONFIG_X86_64
1133
1134 static void enter_lmode(struct kvm_vcpu *vcpu)
1135 {
1136         u32 guest_tr_ar;
1137
1138         guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
1139         if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
1140                 printk(KERN_DEBUG "%s: tss fixup for long mode. \n",
1141                        __FUNCTION__);
1142                 vmcs_write32(GUEST_TR_AR_BYTES,
1143                              (guest_tr_ar & ~AR_TYPE_MASK)
1144                              | AR_TYPE_BUSY_64_TSS);
1145         }
1146
1147         vcpu->shadow_efer |= EFER_LMA;
1148
1149         find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME;
1150         vmcs_write32(VM_ENTRY_CONTROLS,
1151                      vmcs_read32(VM_ENTRY_CONTROLS)
1152                      | VM_ENTRY_IA32E_MODE);
1153 }
1154
1155 static void exit_lmode(struct kvm_vcpu *vcpu)
1156 {
1157         vcpu->shadow_efer &= ~EFER_LMA;
1158
1159         vmcs_write32(VM_ENTRY_CONTROLS,
1160                      vmcs_read32(VM_ENTRY_CONTROLS)
1161                      & ~VM_ENTRY_IA32E_MODE);
1162 }
1163
1164 #endif
1165
1166 static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
1167 {
1168         vcpu->cr4 &= KVM_GUEST_CR4_MASK;
1169         vcpu->cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
1170 }
1171
1172 static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1173 {
1174         vmx_fpu_deactivate(vcpu);
1175
1176         if (vcpu->rmode.active && (cr0 & X86_CR0_PE))
1177                 enter_pmode(vcpu);
1178
1179         if (!vcpu->rmode.active && !(cr0 & X86_CR0_PE))
1180                 enter_rmode(vcpu);
1181
1182 #ifdef CONFIG_X86_64
1183         if (vcpu->shadow_efer & EFER_LME) {
1184                 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
1185                         enter_lmode(vcpu);
1186                 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
1187                         exit_lmode(vcpu);
1188         }
1189 #endif
1190
1191         vmcs_writel(CR0_READ_SHADOW, cr0);
1192         vmcs_writel(GUEST_CR0,
1193                     (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON);
1194         vcpu->cr0 = cr0;
1195
1196         if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE))
1197                 vmx_fpu_activate(vcpu);
1198 }
1199
1200 static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1201 {
1202         vmcs_writel(GUEST_CR3, cr3);
1203         if (vcpu->cr0 & X86_CR0_PE)
1204                 vmx_fpu_deactivate(vcpu);
1205 }
1206
1207 static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1208 {
1209         vmcs_writel(CR4_READ_SHADOW, cr4);
1210         vmcs_writel(GUEST_CR4, cr4 | (vcpu->rmode.active ?
1211                     KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON));
1212         vcpu->cr4 = cr4;
1213 }
1214
1215 #ifdef CONFIG_X86_64
1216
1217 static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1218 {
1219         struct vcpu_vmx *vmx = to_vmx(vcpu);
1220         struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
1221
1222         vcpu->shadow_efer = efer;
1223         if (efer & EFER_LMA) {
1224                 vmcs_write32(VM_ENTRY_CONTROLS,
1225                                      vmcs_read32(VM_ENTRY_CONTROLS) |
1226                                      VM_ENTRY_IA32E_MODE);
1227                 msr->data = efer;
1228
1229         } else {
1230                 vmcs_write32(VM_ENTRY_CONTROLS,
1231                                      vmcs_read32(VM_ENTRY_CONTROLS) &
1232                                      ~VM_ENTRY_IA32E_MODE);
1233
1234                 msr->data = efer & ~EFER_LME;
1235         }
1236         setup_msrs(vmx);
1237 }
1238
1239 #endif
1240
1241 static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1242 {
1243         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1244
1245         return vmcs_readl(sf->base);
1246 }
1247
1248 static void vmx_get_segment(struct kvm_vcpu *vcpu,
1249                             struct kvm_segment *var, int seg)
1250 {
1251         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1252         u32 ar;
1253
1254         var->base = vmcs_readl(sf->base);
1255         var->limit = vmcs_read32(sf->limit);
1256         var->selector = vmcs_read16(sf->selector);
1257         ar = vmcs_read32(sf->ar_bytes);
1258         if (ar & AR_UNUSABLE_MASK)
1259                 ar = 0;
1260         var->type = ar & 15;
1261         var->s = (ar >> 4) & 1;
1262         var->dpl = (ar >> 5) & 3;
1263         var->present = (ar >> 7) & 1;
1264         var->avl = (ar >> 12) & 1;
1265         var->l = (ar >> 13) & 1;
1266         var->db = (ar >> 14) & 1;
1267         var->g = (ar >> 15) & 1;
1268         var->unusable = (ar >> 16) & 1;
1269 }
1270
1271 static u32 vmx_segment_access_rights(struct kvm_segment *var)
1272 {
1273         u32 ar;
1274
1275         if (var->unusable)
1276                 ar = 1 << 16;
1277         else {
1278                 ar = var->type & 15;
1279                 ar |= (var->s & 1) << 4;
1280                 ar |= (var->dpl & 3) << 5;
1281                 ar |= (var->present & 1) << 7;
1282                 ar |= (var->avl & 1) << 12;
1283                 ar |= (var->l & 1) << 13;
1284                 ar |= (var->db & 1) << 14;
1285                 ar |= (var->g & 1) << 15;
1286         }
1287         if (ar == 0) /* a 0 value means unusable */
1288                 ar = AR_UNUSABLE_MASK;
1289
1290         return ar;
1291 }
1292
1293 static void vmx_set_segment(struct kvm_vcpu *vcpu,
1294                             struct kvm_segment *var, int seg)
1295 {
1296         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1297         u32 ar;
1298
1299         if (vcpu->rmode.active && seg == VCPU_SREG_TR) {
1300                 vcpu->rmode.tr.selector = var->selector;
1301                 vcpu->rmode.tr.base = var->base;
1302                 vcpu->rmode.tr.limit = var->limit;
1303                 vcpu->rmode.tr.ar = vmx_segment_access_rights(var);
1304                 return;
1305         }
1306         vmcs_writel(sf->base, var->base);
1307         vmcs_write32(sf->limit, var->limit);
1308         vmcs_write16(sf->selector, var->selector);
1309         if (vcpu->rmode.active && var->s) {
1310                 /*
1311                  * Hack real-mode segments into vm86 compatibility.
1312                  */
1313                 if (var->base == 0xffff0000 && var->selector == 0xf000)
1314                         vmcs_writel(sf->base, 0xf0000);
1315                 ar = 0xf3;
1316         } else
1317                 ar = vmx_segment_access_rights(var);
1318         vmcs_write32(sf->ar_bytes, ar);
1319 }
1320
1321 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
1322 {
1323         u32 ar = vmcs_read32(GUEST_CS_AR_BYTES);
1324
1325         *db = (ar >> 14) & 1;
1326         *l = (ar >> 13) & 1;
1327 }
1328
1329 static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1330 {
1331         dt->limit = vmcs_read32(GUEST_IDTR_LIMIT);
1332         dt->base = vmcs_readl(GUEST_IDTR_BASE);
1333 }
1334
1335 static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1336 {
1337         vmcs_write32(GUEST_IDTR_LIMIT, dt->limit);
1338         vmcs_writel(GUEST_IDTR_BASE, dt->base);
1339 }
1340
1341 static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1342 {
1343         dt->limit = vmcs_read32(GUEST_GDTR_LIMIT);
1344         dt->base = vmcs_readl(GUEST_GDTR_BASE);
1345 }
1346
1347 static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1348 {
1349         vmcs_write32(GUEST_GDTR_LIMIT, dt->limit);
1350         vmcs_writel(GUEST_GDTR_BASE, dt->base);
1351 }
1352
1353 static int init_rmode_tss(struct kvm* kvm)
1354 {
1355         struct page *p1, *p2, *p3;
1356         gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
1357         char *page;
1358
1359         p1 = gfn_to_page(kvm, fn++);
1360         p2 = gfn_to_page(kvm, fn++);
1361         p3 = gfn_to_page(kvm, fn);
1362
1363         if (!p1 || !p2 || !p3) {
1364                 kvm_printf(kvm,"%s: gfn_to_page failed\n", __FUNCTION__);
1365                 return 0;
1366         }
1367
1368         page = kmap_atomic(p1, KM_USER0);
1369         clear_page(page);
1370         *(u16*)(page + 0x66) = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
1371         kunmap_atomic(page, KM_USER0);
1372
1373         page = kmap_atomic(p2, KM_USER0);
1374         clear_page(page);
1375         kunmap_atomic(page, KM_USER0);
1376
1377         page = kmap_atomic(p3, KM_USER0);
1378         clear_page(page);
1379         *(page + RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1) = ~0;
1380         kunmap_atomic(page, KM_USER0);
1381
1382         return 1;
1383 }
1384
1385 static void seg_setup(int seg)
1386 {
1387         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1388
1389         vmcs_write16(sf->selector, 0);
1390         vmcs_writel(sf->base, 0);
1391         vmcs_write32(sf->limit, 0xffff);
1392         vmcs_write32(sf->ar_bytes, 0x93);
1393 }
1394
1395 /*
1396  * Sets up the vmcs for emulated real mode.
1397  */
1398 static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
1399 {
1400         u32 host_sysenter_cs;
1401         u32 junk;
1402         unsigned long a;
1403         struct descriptor_table dt;
1404         int i;
1405         int ret = 0;
1406         unsigned long kvm_vmx_return;
1407         u64 msr;
1408         u32 exec_control;
1409
1410         if (!init_rmode_tss(vmx->vcpu.kvm)) {
1411                 ret = -ENOMEM;
1412                 goto out;
1413         }
1414
1415         vmx->vcpu.regs[VCPU_REGS_RDX] = get_rdx_init_val();
1416         set_cr8(&vmx->vcpu, 0);
1417         msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
1418         if (vmx->vcpu.vcpu_id == 0)
1419                 msr |= MSR_IA32_APICBASE_BSP;
1420         kvm_set_apic_base(&vmx->vcpu, msr);
1421
1422         fx_init(&vmx->vcpu);
1423
1424         /*
1425          * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
1426          * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4.  Sigh.
1427          */
1428         vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
1429         vmcs_writel(GUEST_CS_BASE, 0x000f0000);
1430         vmcs_write32(GUEST_CS_LIMIT, 0xffff);
1431         vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
1432
1433         seg_setup(VCPU_SREG_DS);
1434         seg_setup(VCPU_SREG_ES);
1435         seg_setup(VCPU_SREG_FS);
1436         seg_setup(VCPU_SREG_GS);
1437         seg_setup(VCPU_SREG_SS);
1438
1439         vmcs_write16(GUEST_TR_SELECTOR, 0);
1440         vmcs_writel(GUEST_TR_BASE, 0);
1441         vmcs_write32(GUEST_TR_LIMIT, 0xffff);
1442         vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1443
1444         vmcs_write16(GUEST_LDTR_SELECTOR, 0);
1445         vmcs_writel(GUEST_LDTR_BASE, 0);
1446         vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
1447         vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
1448
1449         vmcs_write32(GUEST_SYSENTER_CS, 0);
1450         vmcs_writel(GUEST_SYSENTER_ESP, 0);
1451         vmcs_writel(GUEST_SYSENTER_EIP, 0);
1452
1453         vmcs_writel(GUEST_RFLAGS, 0x02);
1454         vmcs_writel(GUEST_RIP, 0xfff0);
1455         vmcs_writel(GUEST_RSP, 0);
1456
1457         //todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0
1458         vmcs_writel(GUEST_DR7, 0x400);
1459
1460         vmcs_writel(GUEST_GDTR_BASE, 0);
1461         vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
1462
1463         vmcs_writel(GUEST_IDTR_BASE, 0);
1464         vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
1465
1466         vmcs_write32(GUEST_ACTIVITY_STATE, 0);
1467         vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
1468         vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
1469
1470         /* I/O */
1471         vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a));
1472         vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b));
1473
1474         guest_write_tsc(0);
1475
1476         vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
1477
1478         /* Special registers */
1479         vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
1480
1481         /* Control */
1482         vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
1483                 vmcs_config.pin_based_exec_ctrl);
1484
1485         exec_control = vmcs_config.cpu_based_exec_ctrl;
1486         if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
1487                 exec_control &= ~CPU_BASED_TPR_SHADOW;
1488 #ifdef CONFIG_X86_64
1489                 exec_control |= CPU_BASED_CR8_STORE_EXITING |
1490                                 CPU_BASED_CR8_LOAD_EXITING;
1491 #endif
1492         }
1493         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
1494
1495         vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
1496         vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
1497         vmcs_write32(CR3_TARGET_COUNT, 0);           /* 22.2.1 */
1498
1499         vmcs_writel(HOST_CR0, read_cr0());  /* 22.2.3 */
1500         vmcs_writel(HOST_CR4, read_cr4());  /* 22.2.3, 22.2.5 */
1501         vmcs_writel(HOST_CR3, read_cr3());  /* 22.2.3  FIXME: shadow tables */
1502
1503         vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
1504         vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
1505         vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
1506         vmcs_write16(HOST_FS_SELECTOR, read_fs());    /* 22.2.4 */
1507         vmcs_write16(HOST_GS_SELECTOR, read_gs());    /* 22.2.4 */
1508         vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
1509 #ifdef CONFIG_X86_64
1510         rdmsrl(MSR_FS_BASE, a);
1511         vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
1512         rdmsrl(MSR_GS_BASE, a);
1513         vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
1514 #else
1515         vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
1516         vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
1517 #endif
1518
1519         vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8);  /* 22.2.4 */
1520
1521         get_idt(&dt);
1522         vmcs_writel(HOST_IDTR_BASE, dt.base);   /* 22.2.4 */
1523
1524         asm ("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
1525         vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
1526         vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
1527         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
1528         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
1529
1530         rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
1531         vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
1532         rdmsrl(MSR_IA32_SYSENTER_ESP, a);
1533         vmcs_writel(HOST_IA32_SYSENTER_ESP, a);   /* 22.2.3 */
1534         rdmsrl(MSR_IA32_SYSENTER_EIP, a);
1535         vmcs_writel(HOST_IA32_SYSENTER_EIP, a);   /* 22.2.3 */
1536
1537         for (i = 0; i < NR_VMX_MSR; ++i) {
1538                 u32 index = vmx_msr_index[i];
1539                 u32 data_low, data_high;
1540                 u64 data;
1541                 int j = vmx->nmsrs;
1542
1543                 if (rdmsr_safe(index, &data_low, &data_high) < 0)
1544                         continue;
1545                 if (wrmsr_safe(index, data_low, data_high) < 0)
1546                         continue;
1547                 data = data_low | ((u64)data_high << 32);
1548                 vmx->host_msrs[j].index = index;
1549                 vmx->host_msrs[j].reserved = 0;
1550                 vmx->host_msrs[j].data = data;
1551                 vmx->guest_msrs[j] = vmx->host_msrs[j];
1552                 ++vmx->nmsrs;
1553         }
1554
1555         setup_msrs(vmx);
1556
1557         vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
1558
1559         /* 22.2.1, 20.8.1 */
1560         vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
1561
1562         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);  /* 22.2.1 */
1563
1564 #ifdef CONFIG_X86_64
1565         vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
1566         if (vm_need_tpr_shadow(vmx->vcpu.kvm))
1567                 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
1568                              page_to_phys(vmx->vcpu.apic->regs_page));
1569         vmcs_write32(TPR_THRESHOLD, 0);
1570 #endif
1571
1572         vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
1573         vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
1574
1575         vmx->vcpu.cr0 = 0x60000010;
1576         vmx_set_cr0(&vmx->vcpu, vmx->vcpu.cr0); // enter rmode
1577         vmx_set_cr4(&vmx->vcpu, 0);
1578 #ifdef CONFIG_X86_64
1579         vmx_set_efer(&vmx->vcpu, 0);
1580 #endif
1581         vmx_fpu_activate(&vmx->vcpu);
1582         update_exception_bitmap(&vmx->vcpu);
1583
1584         return 0;
1585
1586 out:
1587         return ret;
1588 }
1589
1590 static void inject_rmode_irq(struct kvm_vcpu *vcpu, int irq)
1591 {
1592         u16 ent[2];
1593         u16 cs;
1594         u16 ip;
1595         unsigned long flags;
1596         unsigned long ss_base = vmcs_readl(GUEST_SS_BASE);
1597         u16 sp =  vmcs_readl(GUEST_RSP);
1598         u32 ss_limit = vmcs_read32(GUEST_SS_LIMIT);
1599
1600         if (sp > ss_limit || sp < 6 ) {
1601                 vcpu_printf(vcpu, "%s: #SS, rsp 0x%lx ss 0x%lx limit 0x%x\n",
1602                             __FUNCTION__,
1603                             vmcs_readl(GUEST_RSP),
1604                             vmcs_readl(GUEST_SS_BASE),
1605                             vmcs_read32(GUEST_SS_LIMIT));
1606                 return;
1607         }
1608
1609         if (emulator_read_std(irq * sizeof(ent), &ent, sizeof(ent), vcpu) !=
1610                                                         X86EMUL_CONTINUE) {
1611                 vcpu_printf(vcpu, "%s: read guest err\n", __FUNCTION__);
1612                 return;
1613         }
1614
1615         flags =  vmcs_readl(GUEST_RFLAGS);
1616         cs =  vmcs_readl(GUEST_CS_BASE) >> 4;
1617         ip =  vmcs_readl(GUEST_RIP);
1618
1619
1620         if (emulator_write_emulated(ss_base + sp - 2, &flags, 2, vcpu) != X86EMUL_CONTINUE ||
1621             emulator_write_emulated(ss_base + sp - 4, &cs, 2, vcpu) != X86EMUL_CONTINUE ||
1622             emulator_write_emulated(ss_base + sp - 6, &ip, 2, vcpu) != X86EMUL_CONTINUE) {
1623                 vcpu_printf(vcpu, "%s: write guest err\n", __FUNCTION__);
1624                 return;
1625         }
1626
1627         vmcs_writel(GUEST_RFLAGS, flags &
1628                     ~( X86_EFLAGS_IF | X86_EFLAGS_AC | X86_EFLAGS_TF));
1629         vmcs_write16(GUEST_CS_SELECTOR, ent[1]) ;
1630         vmcs_writel(GUEST_CS_BASE, ent[1] << 4);
1631         vmcs_writel(GUEST_RIP, ent[0]);
1632         vmcs_writel(GUEST_RSP, (vmcs_readl(GUEST_RSP) & ~0xffff) | (sp - 6));
1633 }
1634
1635 static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
1636 {
1637         if (vcpu->rmode.active) {
1638                 inject_rmode_irq(vcpu, irq);
1639                 return;
1640         }
1641         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
1642                         irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
1643 }
1644
1645 static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
1646 {
1647         int word_index = __ffs(vcpu->irq_summary);
1648         int bit_index = __ffs(vcpu->irq_pending[word_index]);
1649         int irq = word_index * BITS_PER_LONG + bit_index;
1650
1651         clear_bit(bit_index, &vcpu->irq_pending[word_index]);
1652         if (!vcpu->irq_pending[word_index])
1653                 clear_bit(word_index, &vcpu->irq_summary);
1654         vmx_inject_irq(vcpu, irq);
1655 }
1656
1657
1658 static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1659                                        struct kvm_run *kvm_run)
1660 {
1661         u32 cpu_based_vm_exec_control;
1662
1663         vcpu->interrupt_window_open =
1664                 ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
1665                  (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
1666
1667         if (vcpu->interrupt_window_open &&
1668             vcpu->irq_summary &&
1669             !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK))
1670                 /*
1671                  * If interrupts enabled, and not blocked by sti or mov ss. Good.
1672                  */
1673                 kvm_do_inject_irq(vcpu);
1674
1675         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
1676         if (!vcpu->interrupt_window_open &&
1677             (vcpu->irq_summary || kvm_run->request_interrupt_window))
1678                 /*
1679                  * Interrupts blocked.  Wait for unblock.
1680                  */
1681                 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
1682         else
1683                 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
1684         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
1685 }
1686
1687 static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu)
1688 {
1689         struct kvm_guest_debug *dbg = &vcpu->guest_debug;
1690
1691         set_debugreg(dbg->bp[0], 0);
1692         set_debugreg(dbg->bp[1], 1);
1693         set_debugreg(dbg->bp[2], 2);
1694         set_debugreg(dbg->bp[3], 3);
1695
1696         if (dbg->singlestep) {
1697                 unsigned long flags;
1698
1699                 flags = vmcs_readl(GUEST_RFLAGS);
1700                 flags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
1701                 vmcs_writel(GUEST_RFLAGS, flags);
1702         }
1703 }
1704
1705 static int handle_rmode_exception(struct kvm_vcpu *vcpu,
1706                                   int vec, u32 err_code)
1707 {
1708         if (!vcpu->rmode.active)
1709                 return 0;
1710
1711         /*
1712          * Instruction with address size override prefix opcode 0x67
1713          * Cause the #SS fault with 0 error code in VM86 mode.
1714          */
1715         if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
1716                 if (emulate_instruction(vcpu, NULL, 0, 0) == EMULATE_DONE)
1717                         return 1;
1718         return 0;
1719 }
1720
1721 static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1722 {
1723         u32 intr_info, error_code;
1724         unsigned long cr2, rip;
1725         u32 vect_info;
1726         enum emulation_result er;
1727         int r;
1728
1729         vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
1730         intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
1731
1732         if ((vect_info & VECTORING_INFO_VALID_MASK) &&
1733                                                 !is_page_fault(intr_info)) {
1734                 printk(KERN_ERR "%s: unexpected, vectoring info 0x%x "
1735                        "intr info 0x%x\n", __FUNCTION__, vect_info, intr_info);
1736         }
1737
1738         if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) {
1739                 int irq = vect_info & VECTORING_INFO_VECTOR_MASK;
1740                 set_bit(irq, vcpu->irq_pending);
1741                 set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary);
1742         }
1743
1744         if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) { /* nmi */
1745                 asm ("int $2");
1746                 return 1;
1747         }
1748
1749         if (is_no_device(intr_info)) {
1750                 vmx_fpu_activate(vcpu);
1751                 return 1;
1752         }
1753
1754         error_code = 0;
1755         rip = vmcs_readl(GUEST_RIP);
1756         if (intr_info & INTR_INFO_DELIEVER_CODE_MASK)
1757                 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
1758         if (is_page_fault(intr_info)) {
1759                 cr2 = vmcs_readl(EXIT_QUALIFICATION);
1760
1761                 mutex_lock(&vcpu->kvm->lock);
1762                 r = kvm_mmu_page_fault(vcpu, cr2, error_code);
1763                 if (r < 0) {
1764                         mutex_unlock(&vcpu->kvm->lock);
1765                         return r;
1766                 }
1767                 if (!r) {
1768                         mutex_unlock(&vcpu->kvm->lock);
1769                         return 1;
1770                 }
1771
1772                 er = emulate_instruction(vcpu, kvm_run, cr2, error_code);
1773                 mutex_unlock(&vcpu->kvm->lock);
1774
1775                 switch (er) {
1776                 case EMULATE_DONE:
1777                         return 1;
1778                 case EMULATE_DO_MMIO:
1779                         ++vcpu->stat.mmio_exits;
1780                         return 0;
1781                  case EMULATE_FAIL:
1782                         vcpu_printf(vcpu, "%s: emulate fail\n", __FUNCTION__);
1783                         break;
1784                 default:
1785                         BUG();
1786                 }
1787         }
1788
1789         if (vcpu->rmode.active &&
1790             handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
1791                                                                 error_code)) {
1792                 if (vcpu->halt_request) {
1793                         vcpu->halt_request = 0;
1794                         return kvm_emulate_halt(vcpu);
1795                 }
1796                 return 1;
1797         }
1798
1799         if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == (INTR_TYPE_EXCEPTION | 1)) {
1800                 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1801                 return 0;
1802         }
1803         kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
1804         kvm_run->ex.exception = intr_info & INTR_INFO_VECTOR_MASK;
1805         kvm_run->ex.error_code = error_code;
1806         return 0;
1807 }
1808
1809 static int handle_external_interrupt(struct kvm_vcpu *vcpu,
1810                                      struct kvm_run *kvm_run)
1811 {
1812         ++vcpu->stat.irq_exits;
1813         return 1;
1814 }
1815
1816 static int handle_triple_fault(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1817 {
1818         kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1819         return 0;
1820 }
1821
1822 static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1823 {
1824         u64 exit_qualification;
1825         int size, down, in, string, rep;
1826         unsigned port;
1827
1828         ++vcpu->stat.io_exits;
1829         exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
1830         string = (exit_qualification & 16) != 0;
1831
1832         if (string) {
1833                 if (emulate_instruction(vcpu, kvm_run, 0, 0) == EMULATE_DO_MMIO)
1834                         return 0;
1835                 return 1;
1836         }
1837
1838         size = (exit_qualification & 7) + 1;
1839         in = (exit_qualification & 8) != 0;
1840         down = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0;
1841         rep = (exit_qualification & 32) != 0;
1842         port = exit_qualification >> 16;
1843
1844         return kvm_emulate_pio(vcpu, kvm_run, in, size, port);
1845 }
1846
1847 static void
1848 vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
1849 {
1850         /*
1851          * Patch in the VMCALL instruction:
1852          */
1853         hypercall[0] = 0x0f;
1854         hypercall[1] = 0x01;
1855         hypercall[2] = 0xc1;
1856         hypercall[3] = 0xc3;
1857 }
1858
1859 static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1860 {
1861         u64 exit_qualification;
1862         int cr;
1863         int reg;
1864
1865         exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
1866         cr = exit_qualification & 15;
1867         reg = (exit_qualification >> 8) & 15;
1868         switch ((exit_qualification >> 4) & 3) {
1869         case 0: /* mov to cr */
1870                 switch (cr) {
1871                 case 0:
1872                         vcpu_load_rsp_rip(vcpu);
1873                         set_cr0(vcpu, vcpu->regs[reg]);
1874                         skip_emulated_instruction(vcpu);
1875                         return 1;
1876                 case 3:
1877                         vcpu_load_rsp_rip(vcpu);
1878                         set_cr3(vcpu, vcpu->regs[reg]);
1879                         skip_emulated_instruction(vcpu);
1880                         return 1;
1881                 case 4:
1882                         vcpu_load_rsp_rip(vcpu);
1883                         set_cr4(vcpu, vcpu->regs[reg]);
1884                         skip_emulated_instruction(vcpu);
1885                         return 1;
1886                 case 8:
1887                         vcpu_load_rsp_rip(vcpu);
1888                         set_cr8(vcpu, vcpu->regs[reg]);
1889                         skip_emulated_instruction(vcpu);
1890                         kvm_run->exit_reason = KVM_EXIT_SET_TPR;
1891                         return 0;
1892                 };
1893                 break;
1894         case 2: /* clts */
1895                 vcpu_load_rsp_rip(vcpu);
1896                 vmx_fpu_deactivate(vcpu);
1897                 vcpu->cr0 &= ~X86_CR0_TS;
1898                 vmcs_writel(CR0_READ_SHADOW, vcpu->cr0);
1899                 vmx_fpu_activate(vcpu);
1900                 skip_emulated_instruction(vcpu);
1901                 return 1;
1902         case 1: /*mov from cr*/
1903                 switch (cr) {
1904                 case 3:
1905                         vcpu_load_rsp_rip(vcpu);
1906                         vcpu->regs[reg] = vcpu->cr3;
1907                         vcpu_put_rsp_rip(vcpu);
1908                         skip_emulated_instruction(vcpu);
1909                         return 1;
1910                 case 8:
1911                         vcpu_load_rsp_rip(vcpu);
1912                         vcpu->regs[reg] = get_cr8(vcpu);
1913                         vcpu_put_rsp_rip(vcpu);
1914                         skip_emulated_instruction(vcpu);
1915                         return 1;
1916                 }
1917                 break;
1918         case 3: /* lmsw */
1919                 lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
1920
1921                 skip_emulated_instruction(vcpu);
1922                 return 1;
1923         default:
1924                 break;
1925         }
1926         kvm_run->exit_reason = 0;
1927         pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
1928                (int)(exit_qualification >> 4) & 3, cr);
1929         return 0;
1930 }
1931
1932 static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1933 {
1934         u64 exit_qualification;
1935         unsigned long val;
1936         int dr, reg;
1937
1938         /*
1939          * FIXME: this code assumes the host is debugging the guest.
1940          *        need to deal with guest debugging itself too.
1941          */
1942         exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
1943         dr = exit_qualification & 7;
1944         reg = (exit_qualification >> 8) & 15;
1945         vcpu_load_rsp_rip(vcpu);
1946         if (exit_qualification & 16) {
1947                 /* mov from dr */
1948                 switch (dr) {
1949                 case 6:
1950                         val = 0xffff0ff0;
1951                         break;
1952                 case 7:
1953                         val = 0x400;
1954                         break;
1955                 default:
1956                         val = 0;
1957                 }
1958                 vcpu->regs[reg] = val;
1959         } else {
1960                 /* mov to dr */
1961         }
1962         vcpu_put_rsp_rip(vcpu);
1963         skip_emulated_instruction(vcpu);
1964         return 1;
1965 }
1966
1967 static int handle_cpuid(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1968 {
1969         kvm_emulate_cpuid(vcpu);
1970         return 1;
1971 }
1972
1973 static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1974 {
1975         u32 ecx = vcpu->regs[VCPU_REGS_RCX];
1976         u64 data;
1977
1978         if (vmx_get_msr(vcpu, ecx, &data)) {
1979                 vmx_inject_gp(vcpu, 0);
1980                 return 1;
1981         }
1982
1983         /* FIXME: handling of bits 32:63 of rax, rdx */
1984         vcpu->regs[VCPU_REGS_RAX] = data & -1u;
1985         vcpu->regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
1986         skip_emulated_instruction(vcpu);
1987         return 1;
1988 }
1989
1990 static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1991 {
1992         u32 ecx = vcpu->regs[VCPU_REGS_RCX];
1993         u64 data = (vcpu->regs[VCPU_REGS_RAX] & -1u)
1994                 | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32);
1995
1996         if (vmx_set_msr(vcpu, ecx, data) != 0) {
1997                 vmx_inject_gp(vcpu, 0);
1998                 return 1;
1999         }
2000
2001         skip_emulated_instruction(vcpu);
2002         return 1;
2003 }
2004
2005 static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu,
2006                                       struct kvm_run *kvm_run)
2007 {
2008         return 1;
2009 }
2010
2011 static void post_kvm_run_save(struct kvm_vcpu *vcpu,
2012                               struct kvm_run *kvm_run)
2013 {
2014         kvm_run->if_flag = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) != 0;
2015         kvm_run->cr8 = get_cr8(vcpu);
2016         kvm_run->apic_base = kvm_get_apic_base(vcpu);
2017         if (irqchip_in_kernel(vcpu->kvm))
2018                 kvm_run->ready_for_interrupt_injection = 1;
2019         else
2020                 kvm_run->ready_for_interrupt_injection =
2021                                         (vcpu->interrupt_window_open &&
2022                                          vcpu->irq_summary == 0);
2023 }
2024
2025 static int handle_interrupt_window(struct kvm_vcpu *vcpu,
2026                                    struct kvm_run *kvm_run)
2027 {
2028         u32 cpu_based_vm_exec_control;
2029
2030         /* clear pending irq */
2031         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
2032         cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
2033         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
2034         /*
2035          * If the user space waits to inject interrupts, exit as soon as
2036          * possible
2037          */
2038         if (kvm_run->request_interrupt_window &&
2039             !vcpu->irq_summary) {
2040                 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
2041                 ++vcpu->stat.irq_window_exits;
2042                 return 0;
2043         }
2044         return 1;
2045 }
2046
2047 static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2048 {
2049         skip_emulated_instruction(vcpu);
2050         return kvm_emulate_halt(vcpu);
2051 }
2052
2053 static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2054 {
2055         skip_emulated_instruction(vcpu);
2056         return kvm_hypercall(vcpu, kvm_run);
2057 }
2058
2059 /*
2060  * The exit handlers return 1 if the exit was handled fully and guest execution
2061  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
2062  * to be done to userspace and return 0.
2063  */
2064 static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
2065                                       struct kvm_run *kvm_run) = {
2066         [EXIT_REASON_EXCEPTION_NMI]           = handle_exception,
2067         [EXIT_REASON_EXTERNAL_INTERRUPT]      = handle_external_interrupt,
2068         [EXIT_REASON_TRIPLE_FAULT]            = handle_triple_fault,
2069         [EXIT_REASON_IO_INSTRUCTION]          = handle_io,
2070         [EXIT_REASON_CR_ACCESS]               = handle_cr,
2071         [EXIT_REASON_DR_ACCESS]               = handle_dr,
2072         [EXIT_REASON_CPUID]                   = handle_cpuid,
2073         [EXIT_REASON_MSR_READ]                = handle_rdmsr,
2074         [EXIT_REASON_MSR_WRITE]               = handle_wrmsr,
2075         [EXIT_REASON_PENDING_INTERRUPT]       = handle_interrupt_window,
2076         [EXIT_REASON_HLT]                     = handle_halt,
2077         [EXIT_REASON_VMCALL]                  = handle_vmcall,
2078         [EXIT_REASON_TPR_BELOW_THRESHOLD]     = handle_tpr_below_threshold
2079 };
2080
2081 static const int kvm_vmx_max_exit_handlers =
2082         ARRAY_SIZE(kvm_vmx_exit_handlers);
2083
2084 /*
2085  * The guest has exited.  See if we can fix it or if we need userspace
2086  * assistance.
2087  */
2088 static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
2089 {
2090         u32 vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
2091         u32 exit_reason = vmcs_read32(VM_EXIT_REASON);
2092
2093         if ( (vectoring_info & VECTORING_INFO_VALID_MASK) &&
2094                                 exit_reason != EXIT_REASON_EXCEPTION_NMI )
2095                 printk(KERN_WARNING "%s: unexpected, valid vectoring info and "
2096                        "exit reason is 0x%x\n", __FUNCTION__, exit_reason);
2097         if (exit_reason < kvm_vmx_max_exit_handlers
2098             && kvm_vmx_exit_handlers[exit_reason])
2099                 return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run);
2100         else {
2101                 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
2102                 kvm_run->hw.hardware_exit_reason = exit_reason;
2103         }
2104         return 0;
2105 }
2106
2107 /*
2108  * Check if userspace requested an interrupt window, and that the
2109  * interrupt window is open.
2110  *
2111  * No need to exit to userspace if we already have an interrupt queued.
2112  */
2113 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
2114                                           struct kvm_run *kvm_run)
2115 {
2116         return (!vcpu->irq_summary &&
2117                 kvm_run->request_interrupt_window &&
2118                 vcpu->interrupt_window_open &&
2119                 (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF));
2120 }
2121
2122 static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
2123 {
2124 }
2125
2126 static void update_tpr_threshold(struct kvm_vcpu *vcpu)
2127 {
2128         int max_irr, tpr;
2129
2130         if (!vm_need_tpr_shadow(vcpu->kvm))
2131                 return;
2132
2133         if (!kvm_lapic_enabled(vcpu) ||
2134             ((max_irr = kvm_lapic_find_highest_irr(vcpu)) == -1)) {
2135                 vmcs_write32(TPR_THRESHOLD, 0);
2136                 return;
2137         }
2138
2139         tpr = (kvm_lapic_get_cr8(vcpu) & 0x0f) << 4;
2140         vmcs_write32(TPR_THRESHOLD, (max_irr > tpr) ? tpr >> 4 : max_irr >> 4);
2141 }
2142
2143 static void enable_irq_window(struct kvm_vcpu *vcpu)
2144 {
2145         u32 cpu_based_vm_exec_control;
2146
2147         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
2148         cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
2149         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
2150 }
2151
2152 static void vmx_intr_assist(struct kvm_vcpu *vcpu)
2153 {
2154         u32 idtv_info_field, intr_info_field;
2155         int has_ext_irq, interrupt_window_open;
2156         int vector;
2157
2158         kvm_inject_pending_timer_irqs(vcpu);
2159         update_tpr_threshold(vcpu);
2160
2161         has_ext_irq = kvm_cpu_has_interrupt(vcpu);
2162         intr_info_field = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD);
2163         idtv_info_field = vmcs_read32(IDT_VECTORING_INFO_FIELD);
2164         if (intr_info_field & INTR_INFO_VALID_MASK) {
2165                 if (idtv_info_field & INTR_INFO_VALID_MASK) {
2166                         /* TODO: fault when IDT_Vectoring */
2167                         printk(KERN_ERR "Fault when IDT_Vectoring\n");
2168                 }
2169                 if (has_ext_irq)
2170                         enable_irq_window(vcpu);
2171                 return;
2172         }
2173         if (unlikely(idtv_info_field & INTR_INFO_VALID_MASK)) {
2174                 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
2175                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2176                                 vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
2177
2178                 if (unlikely(idtv_info_field & INTR_INFO_DELIEVER_CODE_MASK))
2179                         vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
2180                                 vmcs_read32(IDT_VECTORING_ERROR_CODE));
2181                 if (unlikely(has_ext_irq))
2182                         enable_irq_window(vcpu);
2183                 return;
2184         }
2185         if (!has_ext_irq)
2186                 return;
2187         interrupt_window_open =
2188                 ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
2189                  (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
2190         if (interrupt_window_open) {
2191                 vector = kvm_cpu_get_interrupt(vcpu);
2192                 vmx_inject_irq(vcpu, vector);
2193                 kvm_timer_intr_post(vcpu, vector);
2194         } else
2195                 enable_irq_window(vcpu);
2196 }
2197
2198 static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2199 {
2200         struct vcpu_vmx *vmx = to_vmx(vcpu);
2201         u8 fail;
2202         int r;
2203
2204 preempted:
2205         if (vcpu->guest_debug.enabled)
2206                 kvm_guest_debug_pre(vcpu);
2207
2208 again:
2209         r = kvm_mmu_reload(vcpu);
2210         if (unlikely(r))
2211                 goto out;
2212
2213         preempt_disable();
2214
2215         vmx_save_host_state(vmx);
2216         kvm_load_guest_fpu(vcpu);
2217
2218         /*
2219          * Loading guest fpu may have cleared host cr0.ts
2220          */
2221         vmcs_writel(HOST_CR0, read_cr0());
2222
2223         local_irq_disable();
2224
2225         if (signal_pending(current)) {
2226                 local_irq_enable();
2227                 preempt_enable();
2228                 r = -EINTR;
2229                 kvm_run->exit_reason = KVM_EXIT_INTR;
2230                 ++vcpu->stat.signal_exits;
2231                 goto out;
2232         }
2233
2234         if (irqchip_in_kernel(vcpu->kvm))
2235                 vmx_intr_assist(vcpu);
2236         else if (!vcpu->mmio_read_completed)
2237                 do_interrupt_requests(vcpu, kvm_run);
2238
2239         vcpu->guest_mode = 1;
2240         if (vcpu->requests)
2241                 if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
2242                     vmx_flush_tlb(vcpu);
2243
2244         asm (
2245                 /* Store host registers */
2246 #ifdef CONFIG_X86_64
2247                 "push %%rax; push %%rbx; push %%rdx;"
2248                 "push %%rsi; push %%rdi; push %%rbp;"
2249                 "push %%r8;  push %%r9;  push %%r10; push %%r11;"
2250                 "push %%r12; push %%r13; push %%r14; push %%r15;"
2251                 "push %%rcx \n\t"
2252                 ASM_VMX_VMWRITE_RSP_RDX "\n\t"
2253 #else
2254                 "pusha; push %%ecx \n\t"
2255                 ASM_VMX_VMWRITE_RSP_RDX "\n\t"
2256 #endif
2257                 /* Check if vmlaunch of vmresume is needed */
2258                 "cmp $0, %1 \n\t"
2259                 /* Load guest registers.  Don't clobber flags. */
2260 #ifdef CONFIG_X86_64
2261                 "mov %c[cr2](%3), %%rax \n\t"
2262                 "mov %%rax, %%cr2 \n\t"
2263                 "mov %c[rax](%3), %%rax \n\t"
2264                 "mov %c[rbx](%3), %%rbx \n\t"
2265                 "mov %c[rdx](%3), %%rdx \n\t"
2266                 "mov %c[rsi](%3), %%rsi \n\t"
2267                 "mov %c[rdi](%3), %%rdi \n\t"
2268                 "mov %c[rbp](%3), %%rbp \n\t"
2269                 "mov %c[r8](%3),  %%r8  \n\t"
2270                 "mov %c[r9](%3),  %%r9  \n\t"
2271                 "mov %c[r10](%3), %%r10 \n\t"
2272                 "mov %c[r11](%3), %%r11 \n\t"
2273                 "mov %c[r12](%3), %%r12 \n\t"
2274                 "mov %c[r13](%3), %%r13 \n\t"
2275                 "mov %c[r14](%3), %%r14 \n\t"
2276                 "mov %c[r15](%3), %%r15 \n\t"
2277                 "mov %c[rcx](%3), %%rcx \n\t" /* kills %3 (rcx) */
2278 #else
2279                 "mov %c[cr2](%3), %%eax \n\t"
2280                 "mov %%eax,   %%cr2 \n\t"
2281                 "mov %c[rax](%3), %%eax \n\t"
2282                 "mov %c[rbx](%3), %%ebx \n\t"
2283                 "mov %c[rdx](%3), %%edx \n\t"
2284                 "mov %c[rsi](%3), %%esi \n\t"
2285                 "mov %c[rdi](%3), %%edi \n\t"
2286                 "mov %c[rbp](%3), %%ebp \n\t"
2287                 "mov %c[rcx](%3), %%ecx \n\t" /* kills %3 (ecx) */
2288 #endif
2289                 /* Enter guest mode */
2290                 "jne .Llaunched \n\t"
2291                 ASM_VMX_VMLAUNCH "\n\t"
2292                 "jmp .Lkvm_vmx_return \n\t"
2293                 ".Llaunched: " ASM_VMX_VMRESUME "\n\t"
2294                 ".Lkvm_vmx_return: "
2295                 /* Save guest registers, load host registers, keep flags */
2296 #ifdef CONFIG_X86_64
2297                 "xchg %3,     (%%rsp) \n\t"
2298                 "mov %%rax, %c[rax](%3) \n\t"
2299                 "mov %%rbx, %c[rbx](%3) \n\t"
2300                 "pushq (%%rsp); popq %c[rcx](%3) \n\t"
2301                 "mov %%rdx, %c[rdx](%3) \n\t"
2302                 "mov %%rsi, %c[rsi](%3) \n\t"
2303                 "mov %%rdi, %c[rdi](%3) \n\t"
2304                 "mov %%rbp, %c[rbp](%3) \n\t"
2305                 "mov %%r8,  %c[r8](%3) \n\t"
2306                 "mov %%r9,  %c[r9](%3) \n\t"
2307                 "mov %%r10, %c[r10](%3) \n\t"
2308                 "mov %%r11, %c[r11](%3) \n\t"
2309                 "mov %%r12, %c[r12](%3) \n\t"
2310                 "mov %%r13, %c[r13](%3) \n\t"
2311                 "mov %%r14, %c[r14](%3) \n\t"
2312                 "mov %%r15, %c[r15](%3) \n\t"
2313                 "mov %%cr2, %%rax   \n\t"
2314                 "mov %%rax, %c[cr2](%3) \n\t"
2315                 "mov (%%rsp), %3 \n\t"
2316
2317                 "pop  %%rcx; pop  %%r15; pop  %%r14; pop  %%r13; pop  %%r12;"
2318                 "pop  %%r11; pop  %%r10; pop  %%r9;  pop  %%r8;"
2319                 "pop  %%rbp; pop  %%rdi; pop  %%rsi;"
2320                 "pop  %%rdx; pop  %%rbx; pop  %%rax \n\t"
2321 #else
2322                 "xchg %3, (%%esp) \n\t"
2323                 "mov %%eax, %c[rax](%3) \n\t"
2324                 "mov %%ebx, %c[rbx](%3) \n\t"
2325                 "pushl (%%esp); popl %c[rcx](%3) \n\t"
2326                 "mov %%edx, %c[rdx](%3) \n\t"
2327                 "mov %%esi, %c[rsi](%3) \n\t"
2328                 "mov %%edi, %c[rdi](%3) \n\t"
2329                 "mov %%ebp, %c[rbp](%3) \n\t"
2330                 "mov %%cr2, %%eax  \n\t"
2331                 "mov %%eax, %c[cr2](%3) \n\t"
2332                 "mov (%%esp), %3 \n\t"
2333
2334                 "pop %%ecx; popa \n\t"
2335 #endif
2336                 "setbe %0 \n\t"
2337               : "=q" (fail)
2338               : "r"(vmx->launched), "d"((unsigned long)HOST_RSP),
2339                 "c"(vcpu),
2340                 [rax]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RAX])),
2341                 [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])),
2342                 [rcx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RCX])),
2343                 [rdx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDX])),
2344                 [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])),
2345                 [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])),
2346                 [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])),
2347 #ifdef CONFIG_X86_64
2348                 [r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])),
2349                 [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])),
2350                 [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])),
2351                 [r11]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R11])),
2352                 [r12]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R12])),
2353                 [r13]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R13])),
2354                 [r14]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R14])),
2355                 [r15]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R15])),
2356 #endif
2357                 [cr2]"i"(offsetof(struct kvm_vcpu, cr2))
2358               : "cc", "memory" );
2359
2360         vcpu->guest_mode = 0;
2361         local_irq_enable();
2362
2363         ++vcpu->stat.exits;
2364
2365         vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
2366
2367         asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
2368         vmx->launched = 1;
2369
2370         preempt_enable();
2371
2372         if (unlikely(fail)) {
2373                 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
2374                 kvm_run->fail_entry.hardware_entry_failure_reason
2375                         = vmcs_read32(VM_INSTRUCTION_ERROR);
2376                 r = 0;
2377                 goto out;
2378         }
2379         /*
2380          * Profile KVM exit RIPs:
2381          */
2382         if (unlikely(prof_on == KVM_PROFILING))
2383                 profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
2384
2385         r = kvm_handle_exit(kvm_run, vcpu);
2386         if (r > 0) {
2387                 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
2388                         r = -EINTR;
2389                         kvm_run->exit_reason = KVM_EXIT_INTR;
2390                         ++vcpu->stat.request_irq_exits;
2391                         goto out;
2392                 }
2393                 if (!need_resched()) {
2394                         ++vcpu->stat.light_exits;
2395                         goto again;
2396                 }
2397         }
2398
2399 out:
2400         if (r > 0) {
2401                 kvm_resched(vcpu);
2402                 goto preempted;
2403         }
2404
2405         post_kvm_run_save(vcpu, kvm_run);
2406         return r;
2407 }
2408
2409 static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
2410                                   unsigned long addr,
2411                                   u32 err_code)
2412 {
2413         u32 vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
2414
2415         ++vcpu->stat.pf_guest;
2416
2417         if (is_page_fault(vect_info)) {
2418                 printk(KERN_DEBUG "inject_page_fault: "
2419                        "double fault 0x%lx @ 0x%lx\n",
2420                        addr, vmcs_readl(GUEST_RIP));
2421                 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, 0);
2422                 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2423                              DF_VECTOR |
2424                              INTR_TYPE_EXCEPTION |
2425                              INTR_INFO_DELIEVER_CODE_MASK |
2426                              INTR_INFO_VALID_MASK);
2427                 return;
2428         }
2429         vcpu->cr2 = addr;
2430         vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, err_code);
2431         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2432                      PF_VECTOR |
2433                      INTR_TYPE_EXCEPTION |
2434                      INTR_INFO_DELIEVER_CODE_MASK |
2435                      INTR_INFO_VALID_MASK);
2436
2437 }
2438
2439 static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
2440 {
2441         struct vcpu_vmx *vmx = to_vmx(vcpu);
2442
2443         if (vmx->vmcs) {
2444                 on_each_cpu(__vcpu_clear, vmx, 0, 1);
2445                 free_vmcs(vmx->vmcs);
2446                 vmx->vmcs = NULL;
2447         }
2448 }
2449
2450 static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
2451 {
2452         struct vcpu_vmx *vmx = to_vmx(vcpu);
2453
2454         vmx_free_vmcs(vcpu);
2455         kfree(vmx->host_msrs);
2456         kfree(vmx->guest_msrs);
2457         kvm_vcpu_uninit(vcpu);
2458         kmem_cache_free(kvm_vcpu_cache, vmx);
2459 }
2460
2461 static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
2462 {
2463         int err;
2464         struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
2465         int cpu;
2466
2467         if (!vmx)
2468                 return ERR_PTR(-ENOMEM);
2469
2470         err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
2471         if (err)
2472                 goto free_vcpu;
2473
2474         if (irqchip_in_kernel(kvm)) {
2475                 err = kvm_create_lapic(&vmx->vcpu);
2476                 if (err < 0)
2477                         goto free_vcpu;
2478         }
2479
2480         vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
2481         if (!vmx->guest_msrs) {
2482                 err = -ENOMEM;
2483                 goto uninit_vcpu;
2484         }
2485
2486         vmx->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
2487         if (!vmx->host_msrs)
2488                 goto free_guest_msrs;
2489
2490         vmx->vmcs = alloc_vmcs();
2491         if (!vmx->vmcs)
2492                 goto free_msrs;
2493
2494         vmcs_clear(vmx->vmcs);
2495
2496         cpu = get_cpu();
2497         vmx_vcpu_load(&vmx->vcpu, cpu);
2498         err = vmx_vcpu_setup(vmx);
2499         vmx_vcpu_put(&vmx->vcpu);
2500         put_cpu();
2501         if (err)
2502                 goto free_vmcs;
2503
2504         return &vmx->vcpu;
2505
2506 free_vmcs:
2507         free_vmcs(vmx->vmcs);
2508 free_msrs:
2509         kfree(vmx->host_msrs);
2510 free_guest_msrs:
2511         kfree(vmx->guest_msrs);
2512 uninit_vcpu:
2513         kvm_vcpu_uninit(&vmx->vcpu);
2514 free_vcpu:
2515         kmem_cache_free(kvm_vcpu_cache, vmx);
2516         return ERR_PTR(err);
2517 }
2518
2519 static void __init vmx_check_processor_compat(void *rtn)
2520 {
2521         struct vmcs_config vmcs_conf;
2522
2523         *(int *)rtn = 0;
2524         if (setup_vmcs_config(&vmcs_conf) < 0)
2525                 *(int *)rtn = -EIO;
2526         if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
2527                 printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
2528                                 smp_processor_id());
2529                 *(int *)rtn = -EIO;
2530         }
2531 }
2532
2533 static struct kvm_arch_ops vmx_arch_ops = {
2534         .cpu_has_kvm_support = cpu_has_kvm_support,
2535         .disabled_by_bios = vmx_disabled_by_bios,
2536         .hardware_setup = hardware_setup,
2537         .hardware_unsetup = hardware_unsetup,
2538         .check_processor_compatibility = vmx_check_processor_compat,
2539         .hardware_enable = hardware_enable,
2540         .hardware_disable = hardware_disable,
2541
2542         .vcpu_create = vmx_create_vcpu,
2543         .vcpu_free = vmx_free_vcpu,
2544
2545         .vcpu_load = vmx_vcpu_load,
2546         .vcpu_put = vmx_vcpu_put,
2547         .vcpu_decache = vmx_vcpu_decache,
2548
2549         .set_guest_debug = set_guest_debug,
2550         .get_msr = vmx_get_msr,
2551         .set_msr = vmx_set_msr,
2552         .get_segment_base = vmx_get_segment_base,
2553         .get_segment = vmx_get_segment,
2554         .set_segment = vmx_set_segment,
2555         .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
2556         .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
2557         .set_cr0 = vmx_set_cr0,
2558         .set_cr3 = vmx_set_cr3,
2559         .set_cr4 = vmx_set_cr4,
2560 #ifdef CONFIG_X86_64
2561         .set_efer = vmx_set_efer,
2562 #endif
2563         .get_idt = vmx_get_idt,
2564         .set_idt = vmx_set_idt,
2565         .get_gdt = vmx_get_gdt,
2566         .set_gdt = vmx_set_gdt,
2567         .cache_regs = vcpu_load_rsp_rip,
2568         .decache_regs = vcpu_put_rsp_rip,
2569         .get_rflags = vmx_get_rflags,
2570         .set_rflags = vmx_set_rflags,
2571
2572         .tlb_flush = vmx_flush_tlb,
2573         .inject_page_fault = vmx_inject_page_fault,
2574
2575         .inject_gp = vmx_inject_gp,
2576
2577         .run = vmx_vcpu_run,
2578         .skip_emulated_instruction = skip_emulated_instruction,
2579         .patch_hypercall = vmx_patch_hypercall,
2580         .get_irq = vmx_get_irq,
2581         .set_irq = vmx_inject_irq,
2582 };
2583
2584 static int __init vmx_init(void)
2585 {
2586         void *iova;
2587         int r;
2588
2589         vmx_io_bitmap_a = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
2590         if (!vmx_io_bitmap_a)
2591                 return -ENOMEM;
2592
2593         vmx_io_bitmap_b = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
2594         if (!vmx_io_bitmap_b) {
2595                 r = -ENOMEM;
2596                 goto out;
2597         }
2598
2599         /*
2600          * Allow direct access to the PC debug port (it is often used for I/O
2601          * delays, but the vmexits simply slow things down).
2602          */
2603         iova = kmap(vmx_io_bitmap_a);
2604         memset(iova, 0xff, PAGE_SIZE);
2605         clear_bit(0x80, iova);
2606         kunmap(vmx_io_bitmap_a);
2607
2608         iova = kmap(vmx_io_bitmap_b);
2609         memset(iova, 0xff, PAGE_SIZE);
2610         kunmap(vmx_io_bitmap_b);
2611
2612         r = kvm_init_arch(&vmx_arch_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
2613         if (r)
2614                 goto out1;
2615
2616         return 0;
2617
2618 out1:
2619         __free_page(vmx_io_bitmap_b);
2620 out:
2621         __free_page(vmx_io_bitmap_a);
2622         return r;
2623 }
2624
2625 static void __exit vmx_exit(void)
2626 {
2627         __free_page(vmx_io_bitmap_b);
2628         __free_page(vmx_io_bitmap_a);
2629
2630         kvm_exit_arch();
2631 }
2632
2633 module_init(vmx_init)
2634 module_exit(vmx_exit)