#include "kvm_svm.h"
 #include "irq.h"
 #include "mmu.h"
+#include "kvm_cache_regs.h"
 
 #include <linux/module.h>
 #include <linux/kernel.h>
                printk(KERN_DEBUG "%s: NOP\n", __func__);
                return;
        }
-       if (svm->next_rip - svm->vmcb->save.rip > MAX_INST_SIZE)
-               printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n",
-                      __func__,
-                      svm->vmcb->save.rip,
-                      svm->next_rip);
+       if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
+               printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
+                      __func__, kvm_rip_read(vcpu), svm->next_rip);
 
-       vcpu->arch.rip = svm->vmcb->save.rip = svm->next_rip;
+       kvm_rip_write(vcpu, svm->next_rip);
        svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
 
        vcpu->arch.interrupt_window_open = 1;
        save->dr7 = 0x400;
        save->rflags = 2;
        save->rip = 0x0000fff0;
+       svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
 
        /*
         * cr0 val on cpu init should be 0x60000010, we enable cpu
        init_vmcb(svm);
 
        if (vcpu->vcpu_id != 0) {
-               svm->vmcb->save.rip = 0;
+               kvm_rip_write(vcpu, 0);
                svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
                svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
        }
+       vcpu->arch.regs_avail = ~0;
+       vcpu->arch.regs_dirty = ~0;
 
        return 0;
 }
        rdtscll(vcpu->arch.host_tsc);
 }
 
-static void svm_cache_regs(struct kvm_vcpu *vcpu)
-{
-       struct vcpu_svm *svm = to_svm(vcpu);
-
-       vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
-       vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
-       vcpu->arch.rip = svm->vmcb->save.rip;
-}
-
-static void svm_decache_regs(struct kvm_vcpu *vcpu)
-{
-       struct vcpu_svm *svm = to_svm(vcpu);
-       svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
-       svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
-       svm->vmcb->save.rip = vcpu->arch.rip;
-}
-
 static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
 {
        return to_svm(vcpu)->vmcb->save.rflags;
 
 static int halt_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 {
-       svm->next_rip = svm->vmcb->save.rip + 1;
+       svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
        skip_emulated_instruction(&svm->vcpu);
        return kvm_emulate_halt(&svm->vcpu);
 }
 
 static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 {
-       svm->next_rip = svm->vmcb->save.rip + 3;
+       svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
        skip_emulated_instruction(&svm->vcpu);
        kvm_emulate_hypercall(&svm->vcpu);
        return 1;
 
 static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 {
-       svm->next_rip = svm->vmcb->save.rip + 2;
+       svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
        kvm_emulate_cpuid(&svm->vcpu);
        return 1;
 }
                KVMTRACE_3D(MSR_READ, &svm->vcpu, ecx, (u32)data,
                            (u32)(data >> 32), handler);
 
-               svm->vmcb->save.rax = data & 0xffffffff;
+               svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
                svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
-               svm->next_rip = svm->vmcb->save.rip + 2;
+               svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
                skip_emulated_instruction(&svm->vcpu);
        }
        return 1;
 static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 {
        u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
-       u64 data = (svm->vmcb->save.rax & -1u)
+       u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
                | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
 
        KVMTRACE_3D(MSR_WRITE, &svm->vcpu, ecx, (u32)data, (u32)(data >> 32),
                    handler);
 
-       svm->next_rip = svm->vmcb->save.rip + 2;
+       svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
        if (svm_set_msr(&svm->vcpu, ecx, data))
                kvm_inject_gp(&svm->vcpu, 0);
        else
        u16 gs_selector;
        u16 ldt_selector;
 
+       svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
+       svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
+       svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
+
        pre_svm_run(svm);
 
        sync_lapic_to_cr8(vcpu);
                load_db_regs(svm->host_db_regs);
 
        vcpu->arch.cr2 = svm->vmcb->save.cr2;
+       vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
+       vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
+       vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
 
        write_dr6(svm->host_dr6);
        write_dr7(svm->host_dr7);
        .set_gdt = svm_set_gdt,
        .get_dr = svm_get_dr,
        .set_dr = svm_set_dr,
-       .cache_regs = svm_cache_regs,
-       .decache_regs = svm_decache_regs,
        .get_rflags = svm_get_rflags,
        .set_rflags = svm_set_rflags,
 
 
 #include <linux/highmem.h>
 #include <linux/sched.h>
 #include <linux/moduleparam.h>
+#include "kvm_cache_regs.h"
 
 #include <asm/io.h>
 #include <asm/desc.h>
        unsigned long rip;
        u32 interruptibility;
 
-       rip = vmcs_readl(GUEST_RIP);
+       rip = kvm_rip_read(vcpu);
        rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
-       vmcs_writel(GUEST_RIP, rip);
+       kvm_rip_write(vcpu, rip);
 
        /*
         * We emulated an instruction, so temporary interrupt blocking
        return ret;
 }
 
-/*
- * Sync the rsp and rip registers into the vcpu structure.  This allows
- * registers to be accessed by indexing vcpu->arch.regs.
- */
-static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu)
-{
-       vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
-       vcpu->arch.rip = vmcs_readl(GUEST_RIP);
-}
-
-/*
- * Syncs rsp and rip back into the vmcs.  Should be called after possible
- * modification.
- */
-static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu)
+static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
 {
-       vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
-       vmcs_writel(GUEST_RIP, vcpu->arch.rip);
+       __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
+       switch (reg) {
+       case VCPU_REGS_RSP:
+               vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
+               break;
+       case VCPU_REGS_RIP:
+               vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
+               break;
+       default:
+               break;
+       }
 }
 
 static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
        u64 msr;
        int ret;
 
+       vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
        down_read(&vcpu->kvm->slots_lock);
        if (!init_rmode(vmx->vcpu.kvm)) {
                ret = -ENOMEM;
 
        vmcs_writel(GUEST_RFLAGS, 0x02);
        if (vmx->vcpu.vcpu_id == 0)
-               vmcs_writel(GUEST_RIP, 0xfff0);
+               kvm_rip_write(vcpu, 0xfff0);
        else
-               vmcs_writel(GUEST_RIP, 0);
-       vmcs_writel(GUEST_RSP, 0);
+               kvm_rip_write(vcpu, 0);
+       kvm_register_write(vcpu, VCPU_REGS_RSP, 0);
 
        /* todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 */
        vmcs_writel(GUEST_DR7, 0x400);
        if (vcpu->arch.rmode.active) {
                vmx->rmode.irq.pending = true;
                vmx->rmode.irq.vector = irq;
-               vmx->rmode.irq.rip = vmcs_readl(GUEST_RIP);
+               vmx->rmode.irq.rip = kvm_rip_read(vcpu);
                vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
                             irq | INTR_TYPE_SOFT_INTR | INTR_INFO_VALID_MASK);
                vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
-               vmcs_writel(GUEST_RIP, vmx->rmode.irq.rip - 1);
+               kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
                return;
        }
        vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
        }
 
        error_code = 0;
-       rip = vmcs_readl(GUEST_RIP);
+       rip = kvm_rip_read(vcpu);
        if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
                error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
        if (is_page_fault(intr_info)) {
        reg = (exit_qualification >> 8) & 15;
        switch ((exit_qualification >> 4) & 3) {
        case 0: /* mov to cr */
-               KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)vcpu->arch.regs[reg],
-                           (u32)((u64)vcpu->arch.regs[reg] >> 32), handler);
+               KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr,
+                           (u32)kvm_register_read(vcpu, reg),
+                           (u32)((u64)kvm_register_read(vcpu, reg) >> 32),
+                           handler);
                switch (cr) {
                case 0:
-                       vcpu_load_rsp_rip(vcpu);
-                       kvm_set_cr0(vcpu, vcpu->arch.regs[reg]);
+                       kvm_set_cr0(vcpu, kvm_register_read(vcpu, reg));
                        skip_emulated_instruction(vcpu);
                        return 1;
                case 3:
-                       vcpu_load_rsp_rip(vcpu);
-                       kvm_set_cr3(vcpu, vcpu->arch.regs[reg]);
+                       kvm_set_cr3(vcpu, kvm_register_read(vcpu, reg));
                        skip_emulated_instruction(vcpu);
                        return 1;
                case 4:
-                       vcpu_load_rsp_rip(vcpu);
-                       kvm_set_cr4(vcpu, vcpu->arch.regs[reg]);
+                       kvm_set_cr4(vcpu, kvm_register_read(vcpu, reg));
                        skip_emulated_instruction(vcpu);
                        return 1;
                case 8:
-                       vcpu_load_rsp_rip(vcpu);
-                       kvm_set_cr8(vcpu, vcpu->arch.regs[reg]);
+                       kvm_set_cr8(vcpu, kvm_register_read(vcpu, reg));
                        skip_emulated_instruction(vcpu);
                        if (irqchip_in_kernel(vcpu->kvm))
                                return 1;
                };
                break;
        case 2: /* clts */
-               vcpu_load_rsp_rip(vcpu);
                vmx_fpu_deactivate(vcpu);
                vcpu->arch.cr0 &= ~X86_CR0_TS;
                vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
        case 1: /*mov from cr*/
                switch (cr) {
                case 3:
-                       vcpu_load_rsp_rip(vcpu);
-                       vcpu->arch.regs[reg] = vcpu->arch.cr3;
-                       vcpu_put_rsp_rip(vcpu);
+                       kvm_register_write(vcpu, reg, vcpu->arch.cr3);
                        KVMTRACE_3D(CR_READ, vcpu, (u32)cr,
-                                   (u32)vcpu->arch.regs[reg],
-                                   (u32)((u64)vcpu->arch.regs[reg] >> 32),
+                                   (u32)kvm_register_read(vcpu, reg),
+                                   (u32)((u64)kvm_register_read(vcpu, reg) >> 32),
                                    handler);
                        skip_emulated_instruction(vcpu);
                        return 1;
                case 8:
-                       vcpu_load_rsp_rip(vcpu);
-                       vcpu->arch.regs[reg] = kvm_get_cr8(vcpu);
-                       vcpu_put_rsp_rip(vcpu);
+                       kvm_register_write(vcpu, reg, kvm_get_cr8(vcpu));
                        KVMTRACE_2D(CR_READ, vcpu, (u32)cr,
-                                   (u32)vcpu->arch.regs[reg], handler);
+                                   (u32)kvm_register_read(vcpu, reg), handler);
                        skip_emulated_instruction(vcpu);
                        return 1;
                }
        exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
        dr = exit_qualification & 7;
        reg = (exit_qualification >> 8) & 15;
-       vcpu_load_rsp_rip(vcpu);
        if (exit_qualification & 16) {
                /* mov from dr */
                switch (dr) {
                default:
                        val = 0;
                }
-               vcpu->arch.regs[reg] = val;
+               kvm_register_write(vcpu, reg, val);
                KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
        } else {
                /* mov to dr */
        }
-       vcpu_put_rsp_rip(vcpu);
        skip_emulated_instruction(vcpu);
        return 1;
 }
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        u32 vectoring_info = vmx->idt_vectoring_info;
 
-       KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)vmcs_readl(GUEST_RIP),
-                   (u32)((u64)vmcs_readl(GUEST_RIP) >> 32), entryexit);
+       KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)kvm_rip_read(vcpu),
+                   (u32)((u64)kvm_rip_read(vcpu) >> 32), entryexit);
 
        /* Access CR3 don't cause VMExit in paging mode, so we need
         * to sync with guest real CR3. */
 static void fixup_rmode_irq(struct vcpu_vmx *vmx)
 {
        vmx->rmode.irq.pending = 0;
-       if (vmcs_readl(GUEST_RIP) + 1 != vmx->rmode.irq.rip)
+       if (kvm_rip_read(&vmx->vcpu) + 1 != vmx->rmode.irq.rip)
                return;
-       vmcs_writel(GUEST_RIP, vmx->rmode.irq.rip);
+       kvm_rip_write(&vmx->vcpu, vmx->rmode.irq.rip);
        if (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) {
                vmx->idt_vectoring_info &= ~VECTORING_INFO_TYPE_MASK;
                vmx->idt_vectoring_info |= INTR_TYPE_EXT_INTR;
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        u32 intr_info;
 
+       if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
+               vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
+       if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
+               vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
+
        /*
         * Loading guest fpu may have cleared host cr0.ts
         */
 #endif
              );
 
+       vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
+       vcpu->arch.regs_dirty = 0;
+
        vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
        if (vmx->rmode.irq.pending)
                fixup_rmode_irq(vmx);
        .set_idt = vmx_set_idt,
        .get_gdt = vmx_get_gdt,
        .set_gdt = vmx_set_gdt,
-       .cache_regs = vcpu_load_rsp_rip,
-       .decache_regs = vcpu_put_rsp_rip,
+       .cache_reg = vmx_cache_reg,
        .get_rflags = vmx_get_rflags,
        .set_rflags = vmx_set_rflags,
 
 
 #include "mmu.h"
 #include "i8254.h"
 #include "tss.h"
+#include "kvm_cache_regs.h"
 
 #include <linux/clocksource.h>
 #include <linux/kvm.h>
                                    struct kvm_cpuid_entry2 __user *entries);
 
 struct kvm_x86_ops *kvm_x86_ops;
+EXPORT_SYMBOL_GPL(kvm_x86_ops);
 
 struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "pf_fixed", VCPU_STAT(pf_fixed) },
 void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
 {
        u8 opcodes[4];
-       unsigned long rip = vcpu->arch.rip;
+       unsigned long rip = kvm_rip_read(vcpu);
        unsigned long rip_linear;
 
        if (!printk_ratelimit())
        .cmpxchg_emulated    = emulator_cmpxchg_emulated,
 };
 
+static void cache_all_regs(struct kvm_vcpu *vcpu)
+{
+       kvm_register_read(vcpu, VCPU_REGS_RAX);
+       kvm_register_read(vcpu, VCPU_REGS_RSP);
+       kvm_register_read(vcpu, VCPU_REGS_RIP);
+       vcpu->arch.regs_dirty = ~0;
+}
+
 int emulate_instruction(struct kvm_vcpu *vcpu,
                        struct kvm_run *run,
                        unsigned long cr2,
        struct decode_cache *c;
 
        vcpu->arch.mmio_fault_cr2 = cr2;
-       kvm_x86_ops->cache_regs(vcpu);
+       /*
+        * TODO: fix x86_emulate.c to use guest_read/write_register
+        * instead of direct ->regs accesses, can save hundred cycles
+        * on Intel for instructions that don't read/change RSP, for
+        * for example.
+        */
+       cache_all_regs(vcpu);
 
        vcpu->mmio_is_write = 0;
        vcpu->arch.pio.string = 0;
                return EMULATE_DO_MMIO;
        }
 
-       kvm_x86_ops->decache_regs(vcpu);
        kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
 
        if (vcpu->mmio_is_write) {
        struct kvm_pio_request *io = &vcpu->arch.pio;
        long delta;
        int r;
-
-       kvm_x86_ops->cache_regs(vcpu);
+       unsigned long val;
 
        if (!io->string) {
-               if (io->in)
-                       memcpy(&vcpu->arch.regs[VCPU_REGS_RAX], vcpu->arch.pio_data,
-                              io->size);
+               if (io->in) {
+                       val = kvm_register_read(vcpu, VCPU_REGS_RAX);
+                       memcpy(&val, vcpu->arch.pio_data, io->size);
+                       kvm_register_write(vcpu, VCPU_REGS_RAX, val);
+               }
        } else {
                if (io->in) {
                        r = pio_copy_data(vcpu);
-                       if (r) {
-                               kvm_x86_ops->cache_regs(vcpu);
+                       if (r)
                                return r;
-                       }
                }
 
                delta = 1;
                         * The size of the register should really depend on
                         * current address size.
                         */
-                       vcpu->arch.regs[VCPU_REGS_RCX] -= delta;
+                       val = kvm_register_read(vcpu, VCPU_REGS_RCX);
+                       val -= delta;
+                       kvm_register_write(vcpu, VCPU_REGS_RCX, val);
                }
                if (io->down)
                        delta = -delta;
                delta *= io->size;
-               if (io->in)
-                       vcpu->arch.regs[VCPU_REGS_RDI] += delta;
-               else
-                       vcpu->arch.regs[VCPU_REGS_RSI] += delta;
+               if (io->in) {
+                       val = kvm_register_read(vcpu, VCPU_REGS_RDI);
+                       val += delta;
+                       kvm_register_write(vcpu, VCPU_REGS_RDI, val);
+               } else {
+                       val = kvm_register_read(vcpu, VCPU_REGS_RSI);
+                       val += delta;
+                       kvm_register_write(vcpu, VCPU_REGS_RSI, val);
+               }
        }
 
-       kvm_x86_ops->decache_regs(vcpu);
-
        io->count -= io->cur_count;
        io->cur_count = 0;
 
                  int size, unsigned port)
 {
        struct kvm_io_device *pio_dev;
+       unsigned long val;
 
        vcpu->run->exit_reason = KVM_EXIT_IO;
        vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
                KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
                            handler);
 
-       kvm_x86_ops->cache_regs(vcpu);
-       memcpy(vcpu->arch.pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4);
+       val = kvm_register_read(vcpu, VCPU_REGS_RAX);
+       memcpy(vcpu->arch.pio_data, &val, 4);
 
        kvm_x86_ops->skip_emulated_instruction(vcpu);
 
        unsigned long nr, a0, a1, a2, a3, ret;
        int r = 1;
 
-       kvm_x86_ops->cache_regs(vcpu);
-
-       nr = vcpu->arch.regs[VCPU_REGS_RAX];
-       a0 = vcpu->arch.regs[VCPU_REGS_RBX];
-       a1 = vcpu->arch.regs[VCPU_REGS_RCX];
-       a2 = vcpu->arch.regs[VCPU_REGS_RDX];
-       a3 = vcpu->arch.regs[VCPU_REGS_RSI];
+       nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
+       a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
+       a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
+       a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
+       a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
 
        KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler);
 
                ret = -KVM_ENOSYS;
                break;
        }
-       vcpu->arch.regs[VCPU_REGS_RAX] = ret;
-       kvm_x86_ops->decache_regs(vcpu);
+       kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
        ++vcpu->stat.hypercalls;
        return r;
 }
 {
        char instruction[3];
        int ret = 0;
+       unsigned long rip = kvm_rip_read(vcpu);
 
 
        /*
         */
        kvm_mmu_zap_all(vcpu->kvm);
 
-       kvm_x86_ops->cache_regs(vcpu);
        kvm_x86_ops->patch_hypercall(vcpu, instruction);
-       if (emulator_write_emulated(vcpu->arch.rip, instruction, 3, vcpu)
+       if (emulator_write_emulated(rip, instruction, 3, vcpu)
            != X86EMUL_CONTINUE)
                ret = -EFAULT;
 
        u32 function, index;
        struct kvm_cpuid_entry2 *e, *best;
 
-       kvm_x86_ops->cache_regs(vcpu);
-       function = vcpu->arch.regs[VCPU_REGS_RAX];
-       index = vcpu->arch.regs[VCPU_REGS_RCX];
-       vcpu->arch.regs[VCPU_REGS_RAX] = 0;
-       vcpu->arch.regs[VCPU_REGS_RBX] = 0;
-       vcpu->arch.regs[VCPU_REGS_RCX] = 0;
-       vcpu->arch.regs[VCPU_REGS_RDX] = 0;
+       function = kvm_register_read(vcpu, VCPU_REGS_RAX);
+       index = kvm_register_read(vcpu, VCPU_REGS_RCX);
+       kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
+       kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
+       kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
+       kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
        best = NULL;
        for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
                e = &vcpu->arch.cpuid_entries[i];
                                best = e;
        }
        if (best) {
-               vcpu->arch.regs[VCPU_REGS_RAX] = best->eax;
-               vcpu->arch.regs[VCPU_REGS_RBX] = best->ebx;
-               vcpu->arch.regs[VCPU_REGS_RCX] = best->ecx;
-               vcpu->arch.regs[VCPU_REGS_RDX] = best->edx;
+               kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
+               kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
+               kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
+               kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
        }
-       kvm_x86_ops->decache_regs(vcpu);
        kvm_x86_ops->skip_emulated_instruction(vcpu);
        KVMTRACE_5D(CPUID, vcpu, function,
-                   (u32)vcpu->arch.regs[VCPU_REGS_RAX],
-                   (u32)vcpu->arch.regs[VCPU_REGS_RBX],
-                   (u32)vcpu->arch.regs[VCPU_REGS_RCX],
-                   (u32)vcpu->arch.regs[VCPU_REGS_RDX], handler);
+                   (u32)kvm_register_read(vcpu, VCPU_REGS_RAX),
+                   (u32)kvm_register_read(vcpu, VCPU_REGS_RBX),
+                   (u32)kvm_register_read(vcpu, VCPU_REGS_RCX),
+                   (u32)kvm_register_read(vcpu, VCPU_REGS_RDX), handler);
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
 
         * Profile KVM exit RIPs:
         */
        if (unlikely(prof_on == KVM_PROFILING)) {
-               kvm_x86_ops->cache_regs(vcpu);
-               profile_hit(KVM_PROFILING, (void *)vcpu->arch.rip);
+               unsigned long rip = kvm_rip_read(vcpu);
+               profile_hit(KVM_PROFILING, (void *)rip);
        }
 
        if (vcpu->arch.exception.pending && kvm_x86_ops->exception_injected(vcpu))
                }
        }
 #endif
-       if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
-               kvm_x86_ops->cache_regs(vcpu);
-               vcpu->arch.regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
-               kvm_x86_ops->decache_regs(vcpu);
-       }
+       if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
+               kvm_register_write(vcpu, VCPU_REGS_RAX,
+                                    kvm_run->hypercall.ret);
 
        r = __vcpu_run(vcpu, kvm_run);
 
 {
        vcpu_load(vcpu);
 
-       kvm_x86_ops->cache_regs(vcpu);
-
-       regs->rax = vcpu->arch.regs[VCPU_REGS_RAX];
-       regs->rbx = vcpu->arch.regs[VCPU_REGS_RBX];
-       regs->rcx = vcpu->arch.regs[VCPU_REGS_RCX];
-       regs->rdx = vcpu->arch.regs[VCPU_REGS_RDX];
-       regs->rsi = vcpu->arch.regs[VCPU_REGS_RSI];
-       regs->rdi = vcpu->arch.regs[VCPU_REGS_RDI];
-       regs->rsp = vcpu->arch.regs[VCPU_REGS_RSP];
-       regs->rbp = vcpu->arch.regs[VCPU_REGS_RBP];
+       regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
+       regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
+       regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
+       regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
+       regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
+       regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
+       regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
+       regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
 #ifdef CONFIG_X86_64
-       regs->r8 = vcpu->arch.regs[VCPU_REGS_R8];
-       regs->r9 = vcpu->arch.regs[VCPU_REGS_R9];
-       regs->r10 = vcpu->arch.regs[VCPU_REGS_R10];
-       regs->r11 = vcpu->arch.regs[VCPU_REGS_R11];
-       regs->r12 = vcpu->arch.regs[VCPU_REGS_R12];
-       regs->r13 = vcpu->arch.regs[VCPU_REGS_R13];
-       regs->r14 = vcpu->arch.regs[VCPU_REGS_R14];
-       regs->r15 = vcpu->arch.regs[VCPU_REGS_R15];
+       regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
+       regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
+       regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
+       regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
+       regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
+       regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
+       regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
+       regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
 #endif
 
-       regs->rip = vcpu->arch.rip;
+       regs->rip = kvm_rip_read(vcpu);
        regs->rflags = kvm_x86_ops->get_rflags(vcpu);
 
        /*
 {
        vcpu_load(vcpu);
 
-       vcpu->arch.regs[VCPU_REGS_RAX] = regs->rax;
-       vcpu->arch.regs[VCPU_REGS_RBX] = regs->rbx;
-       vcpu->arch.regs[VCPU_REGS_RCX] = regs->rcx;
-       vcpu->arch.regs[VCPU_REGS_RDX] = regs->rdx;
-       vcpu->arch.regs[VCPU_REGS_RSI] = regs->rsi;
-       vcpu->arch.regs[VCPU_REGS_RDI] = regs->rdi;
-       vcpu->arch.regs[VCPU_REGS_RSP] = regs->rsp;
-       vcpu->arch.regs[VCPU_REGS_RBP] = regs->rbp;
+       kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
+       kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
+       kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
+       kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
+       kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
+       kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
+       kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
+       kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
 #ifdef CONFIG_X86_64
-       vcpu->arch.regs[VCPU_REGS_R8] = regs->r8;
-       vcpu->arch.regs[VCPU_REGS_R9] = regs->r9;
-       vcpu->arch.regs[VCPU_REGS_R10] = regs->r10;
-       vcpu->arch.regs[VCPU_REGS_R11] = regs->r11;
-       vcpu->arch.regs[VCPU_REGS_R12] = regs->r12;
-       vcpu->arch.regs[VCPU_REGS_R13] = regs->r13;
-       vcpu->arch.regs[VCPU_REGS_R14] = regs->r14;
-       vcpu->arch.regs[VCPU_REGS_R15] = regs->r15;
+       kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
+       kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
+       kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
+       kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
+       kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
+       kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
+       kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
+       kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
+
 #endif
 
-       vcpu->arch.rip = regs->rip;
+       kvm_rip_write(vcpu, regs->rip);
        kvm_x86_ops->set_rflags(vcpu, regs->rflags);
 
-       kvm_x86_ops->decache_regs(vcpu);
 
        vcpu->arch.exception.pending = false;
 
                                struct tss_segment_32 *tss)
 {
        tss->cr3 = vcpu->arch.cr3;
-       tss->eip = vcpu->arch.rip;
+       tss->eip = kvm_rip_read(vcpu);
        tss->eflags = kvm_x86_ops->get_rflags(vcpu);
-       tss->eax = vcpu->arch.regs[VCPU_REGS_RAX];
-       tss->ecx = vcpu->arch.regs[VCPU_REGS_RCX];
-       tss->edx = vcpu->arch.regs[VCPU_REGS_RDX];
-       tss->ebx = vcpu->arch.regs[VCPU_REGS_RBX];
-       tss->esp = vcpu->arch.regs[VCPU_REGS_RSP];
-       tss->ebp = vcpu->arch.regs[VCPU_REGS_RBP];
-       tss->esi = vcpu->arch.regs[VCPU_REGS_RSI];
-       tss->edi = vcpu->arch.regs[VCPU_REGS_RDI];
-
+       tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
+       tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
+       tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX);
+       tss->ebx = kvm_register_read(vcpu, VCPU_REGS_RBX);
+       tss->esp = kvm_register_read(vcpu, VCPU_REGS_RSP);
+       tss->ebp = kvm_register_read(vcpu, VCPU_REGS_RBP);
+       tss->esi = kvm_register_read(vcpu, VCPU_REGS_RSI);
+       tss->edi = kvm_register_read(vcpu, VCPU_REGS_RDI);
        tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
        tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
        tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
 {
        kvm_set_cr3(vcpu, tss->cr3);
 
-       vcpu->arch.rip = tss->eip;
+       kvm_rip_write(vcpu, tss->eip);
        kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2);
 
-       vcpu->arch.regs[VCPU_REGS_RAX] = tss->eax;
-       vcpu->arch.regs[VCPU_REGS_RCX] = tss->ecx;
-       vcpu->arch.regs[VCPU_REGS_RDX] = tss->edx;
-       vcpu->arch.regs[VCPU_REGS_RBX] = tss->ebx;
-       vcpu->arch.regs[VCPU_REGS_RSP] = tss->esp;
-       vcpu->arch.regs[VCPU_REGS_RBP] = tss->ebp;
-       vcpu->arch.regs[VCPU_REGS_RSI] = tss->esi;
-       vcpu->arch.regs[VCPU_REGS_RDI] = tss->edi;
+       kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax);
+       kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx);
+       kvm_register_write(vcpu, VCPU_REGS_RDX, tss->edx);
+       kvm_register_write(vcpu, VCPU_REGS_RBX, tss->ebx);
+       kvm_register_write(vcpu, VCPU_REGS_RSP, tss->esp);
+       kvm_register_write(vcpu, VCPU_REGS_RBP, tss->ebp);
+       kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
+       kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
 
        if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
                return 1;
 static void save_state_to_tss16(struct kvm_vcpu *vcpu,
                                struct tss_segment_16 *tss)
 {
-       tss->ip = vcpu->arch.rip;
+       tss->ip = kvm_rip_read(vcpu);
        tss->flag = kvm_x86_ops->get_rflags(vcpu);
-       tss->ax = vcpu->arch.regs[VCPU_REGS_RAX];
-       tss->cx = vcpu->arch.regs[VCPU_REGS_RCX];
-       tss->dx = vcpu->arch.regs[VCPU_REGS_RDX];
-       tss->bx = vcpu->arch.regs[VCPU_REGS_RBX];
-       tss->sp = vcpu->arch.regs[VCPU_REGS_RSP];
-       tss->bp = vcpu->arch.regs[VCPU_REGS_RBP];
-       tss->si = vcpu->arch.regs[VCPU_REGS_RSI];
-       tss->di = vcpu->arch.regs[VCPU_REGS_RDI];
+       tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX);
+       tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX);
+       tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX);
+       tss->bx = kvm_register_read(vcpu, VCPU_REGS_RBX);
+       tss->sp = kvm_register_read(vcpu, VCPU_REGS_RSP);
+       tss->bp = kvm_register_read(vcpu, VCPU_REGS_RBP);
+       tss->si = kvm_register_read(vcpu, VCPU_REGS_RSI);
+       tss->di = kvm_register_read(vcpu, VCPU_REGS_RDI);
 
        tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
        tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
 static int load_state_from_tss16(struct kvm_vcpu *vcpu,
                                 struct tss_segment_16 *tss)
 {
-       vcpu->arch.rip = tss->ip;
+       kvm_rip_write(vcpu, tss->ip);
        kvm_x86_ops->set_rflags(vcpu, tss->flag | 2);
-       vcpu->arch.regs[VCPU_REGS_RAX] = tss->ax;
-       vcpu->arch.regs[VCPU_REGS_RCX] = tss->cx;
-       vcpu->arch.regs[VCPU_REGS_RDX] = tss->dx;
-       vcpu->arch.regs[VCPU_REGS_RBX] = tss->bx;
-       vcpu->arch.regs[VCPU_REGS_RSP] = tss->sp;
-       vcpu->arch.regs[VCPU_REGS_RBP] = tss->bp;
-       vcpu->arch.regs[VCPU_REGS_RSI] = tss->si;
-       vcpu->arch.regs[VCPU_REGS_RDI] = tss->di;
+       kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax);
+       kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx);
+       kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx);
+       kvm_register_write(vcpu, VCPU_REGS_RBX, tss->bx);
+       kvm_register_write(vcpu, VCPU_REGS_RSP, tss->sp);
+       kvm_register_write(vcpu, VCPU_REGS_RBP, tss->bp);
+       kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
+       kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
 
        if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
                return 1;
        }
 
        kvm_x86_ops->skip_emulated_instruction(vcpu);
-       kvm_x86_ops->cache_regs(vcpu);
 
        if (nseg_desc.type & 8)
                ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_base,
        tr_seg.type = 11;
        kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
 out:
-       kvm_x86_ops->decache_regs(vcpu);
        return ret;
 }
 EXPORT_SYMBOL_GPL(kvm_task_switch);