load_transition_efer(vmx);
 }
 
-static void vmx_load_host_state(struct vcpu_vmx *vmx)
+static void __vmx_load_host_state(struct vcpu_vmx *vmx)
 {
        unsigned long flags;
 
        reload_host_efer(vmx);
 }
 
+static void vmx_load_host_state(struct vcpu_vmx *vmx)
+{
+       preempt_disable();
+       __vmx_load_host_state(vmx);
+       preempt_enable();
+}
+
 /*
  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
  * vcpu mutex is already taken.
 
 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
 {
-       vmx_load_host_state(to_vmx(vcpu));
+       __vmx_load_host_state(to_vmx(vcpu));
 }
 
 static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
        switch (msr_index) {
 #ifdef CONFIG_X86_64
        case MSR_EFER:
+               vmx_load_host_state(vmx);
                ret = kvm_set_msr_common(vcpu, msr_index, data);
-               if (vmx->host_state.loaded) {
-                       reload_host_efer(vmx);
-                       load_transition_efer(vmx);
-               }
                break;
        case MSR_FS_BASE:
                vmcs_writel(GUEST_FS_BASE, data);
                guest_write_tsc(data);
                break;
        default:
+               vmx_load_host_state(vmx);
                msr = find_msr_entry(vmx, msr_index);
                if (msr) {
                        msr->data = data;
-                       if (vmx->host_state.loaded)
-                               load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
                        break;
                }
                ret = kvm_set_msr_common(vcpu, msr_index, data);