struct {
int loaded;
u16 fs_sel, gs_sel, ldt_sel;
- int fs_gs_ldt_reload_needed;
+ int gs_ldt_reload_needed;
+ int fs_reload_needed;
}host_state;
};
* allow segment selectors with cpl > 0 or ti == 1.
*/
vmx->host_state.ldt_sel = read_ldt();
- vmx->host_state.fs_gs_ldt_reload_needed = vmx->host_state.ldt_sel;
+ vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
vmx->host_state.fs_sel = read_fs();
- if (!(vmx->host_state.fs_sel & 7))
+ if (!(vmx->host_state.fs_sel & 7)) {
vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
- else {
+ vmx->host_state.fs_reload_needed = 0;
+ } else {
vmcs_write16(HOST_FS_SELECTOR, 0);
- vmx->host_state.fs_gs_ldt_reload_needed = 1;
+ vmx->host_state.fs_reload_needed = 1;
}
vmx->host_state.gs_sel = read_gs();
if (!(vmx->host_state.gs_sel & 7))
vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
else {
vmcs_write16(HOST_GS_SELECTOR, 0);
- vmx->host_state.fs_gs_ldt_reload_needed = 1;
+ vmx->host_state.gs_ldt_reload_needed = 1;
}
#ifdef CONFIG_X86_64
return;
vmx->host_state.loaded = 0;
- if (vmx->host_state.fs_gs_ldt_reload_needed) {
- load_ldt(vmx->host_state.ldt_sel);
+ if (vmx->host_state.fs_reload_needed)
load_fs(vmx->host_state.fs_sel);
+ if (vmx->host_state.gs_ldt_reload_needed) {
+ load_ldt(vmx->host_state.ldt_sel);
/*
* If we have to reload gs, we must take care to
* preserve our gs base.
wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
#endif
local_irq_restore(flags);
-
- reload_tss();
}
+ reload_tss();
save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
load_msrs(vmx->host_msrs, vmx->save_nmsrs);
if (msr_efer_need_save_restore(vmx))
vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
}
-static int rmode_tss_base(struct kvm* kvm)
+static gva_t rmode_tss_base(struct kvm* kvm)
{
gfn_t base_gfn = kvm->memslots[0].base_gfn + kvm->memslots[0].npages - 3;
return base_gfn << PAGE_SHIFT;
find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME;
vmcs_write32(VM_ENTRY_CONTROLS,
vmcs_read32(VM_ENTRY_CONTROLS)
- | VM_ENTRY_CONTROLS_IA32E_MASK);
+ | VM_ENTRY_IA32E_MODE);
}
static void exit_lmode(struct kvm_vcpu *vcpu)
vmcs_write32(VM_ENTRY_CONTROLS,
vmcs_read32(VM_ENTRY_CONTROLS)
- & ~VM_ENTRY_CONTROLS_IA32E_MASK);
+ & ~VM_ENTRY_IA32E_MODE);
}
#endif
if (efer & EFER_LMA) {
vmcs_write32(VM_ENTRY_CONTROLS,
vmcs_read32(VM_ENTRY_CONTROLS) |
- VM_ENTRY_CONTROLS_IA32E_MASK);
+ VM_ENTRY_IA32E_MODE);
msr->data = efer;
} else {
vmcs_write32(VM_ENTRY_CONTROLS,
vmcs_read32(VM_ENTRY_CONTROLS) &
- ~VM_ENTRY_CONTROLS_IA32E_MASK);
+ ~VM_ENTRY_IA32E_MODE);
msr->data = efer & ~EFER_LME;
}
return 0;
}
-static int get_io_count(struct kvm_vcpu *vcpu, unsigned long *count)
-{
- u64 inst;
- gva_t rip;
- int countr_size;
- int i;
-
- if ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_VM)) {
- countr_size = 2;
- } else {
- u32 cs_ar = vmcs_read32(GUEST_CS_AR_BYTES);
-
- countr_size = (cs_ar & AR_L_MASK) ? 8:
- (cs_ar & AR_DB_MASK) ? 4: 2;
- }
-
- rip = vmcs_readl(GUEST_RIP);
- if (countr_size != 8)
- rip += vmcs_readl(GUEST_CS_BASE);
-
- if (emulator_read_std(rip, &inst, sizeof(inst), vcpu) !=
- X86EMUL_CONTINUE)
- return 0;
-
- for (i = 0; i < sizeof(inst); i++) {
- switch (((u8*)&inst)[i]) {
- case 0xf0:
- case 0xf2:
- case 0xf3:
- case 0x2e:
- case 0x36:
- case 0x3e:
- case 0x26:
- case 0x64:
- case 0x65:
- case 0x66:
- break;
- case 0x67:
- countr_size = (countr_size == 2) ? 4: (countr_size >> 1);
- default:
- goto done;
- }
- }
- return 0;
-done:
- countr_size *= 8;
- *count = vcpu->regs[VCPU_REGS_RCX] & (~0ULL >> (64 - countr_size));
- //printk("cx: %lx\n", vcpu->regs[VCPU_REGS_RCX]);
- return 1;
-}
-
static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
u64 exit_qualification;
int size, down, in, string, rep;
unsigned port;
- unsigned long count;
- gva_t address;
++vcpu->stat.io_exits;
exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
- in = (exit_qualification & 8) != 0;
- size = (exit_qualification & 7) + 1;
string = (exit_qualification & 16) != 0;
+
+ if (string) {
+ if (emulate_instruction(vcpu, kvm_run, 0, 0) == EMULATE_DO_MMIO)
+ return 0;
+ return 1;
+ }
+
+ size = (exit_qualification & 7) + 1;
+ in = (exit_qualification & 8) != 0;
down = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0;
- count = 1;
rep = (exit_qualification & 32) != 0;
port = exit_qualification >> 16;
- address = 0;
- if (string) {
- if (rep && !get_io_count(vcpu, &count))
- return 1;
- address = vmcs_readl(GUEST_LINEAR_ADDRESS);
- }
- return kvm_setup_pio(vcpu, kvm_run, in, size, count, string, down,
- address, rep, port);
+
+ return kvm_emulate_pio(vcpu, kvm_run, in, size, port);
}
static void
vcpu_load_rsp_rip(vcpu);
set_cr8(vcpu, vcpu->regs[reg]);
skip_emulated_instruction(vcpu);
- return 1;
+ kvm_run->exit_reason = KVM_EXIT_SET_TPR;
+ return 0;
};
break;
case 2: /* clts */
preempt_disable();
- if (!vcpu->mmio_read_completed)
- do_interrupt_requests(vcpu, kvm_run);
-
vmx_save_host_state(vmx);
kvm_load_guest_fpu(vcpu);
local_irq_disable();
+ if (signal_pending(current)) {
+ local_irq_enable();
+ preempt_enable();
+ r = -EINTR;
+ kvm_run->exit_reason = KVM_EXIT_INTR;
+ ++vcpu->stat.signal_exits;
+ goto out;
+ }
+
+ if (!vcpu->mmio_read_completed)
+ do_interrupt_requests(vcpu, kvm_run);
+
vcpu->guest_mode = 1;
if (vcpu->requests)
if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
r = kvm_handle_exit(kvm_run, vcpu);
if (r > 0) {
- /* Give scheduler a change to reschedule. */
- if (signal_pending(current)) {
- r = -EINTR;
- kvm_run->exit_reason = KVM_EXIT_INTR;
- ++vcpu->stat.signal_exits;
- goto out;
- }
-
if (dm_request_for_irq_injection(vcpu, kvm_run)) {
r = -EINTR;
kvm_run->exit_reason = KVM_EXIT_INTR;
kfree(vmx->host_msrs);
kfree(vmx->guest_msrs);
kvm_vcpu_uninit(vcpu);
- kfree(vmx);
+ kmem_cache_free(kvm_vcpu_cache, vmx);
}
static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
uninit_vcpu:
kvm_vcpu_uninit(&vmx->vcpu);
free_vcpu:
- kfree(vmx);
+ kmem_cache_free(kvm_vcpu_cache, vmx);
return ERR_PTR(err);
}