#define KVM_MAX_CPUID_ENTRIES 40
 
 #define DE_VECTOR 0
+#define UD_VECTOR 6
 #define NM_VECTOR 7
 #define DF_VECTOR 8
 #define TS_VECTOR 10
        unsigned long cr0;
        unsigned long cr2;
        unsigned long cr3;
-       gpa_t para_state_gpa;
-       struct page *para_state_page;
-       gpa_t hypercall_gpa;
        unsigned long cr4;
        unsigned long cr8;
        u64 pdptrs[4]; /* pae */
 int kvm_mmu_load(struct kvm_vcpu *vcpu);
 void kvm_mmu_unload(struct kvm_vcpu *vcpu);
 
-int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
+
+int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
 
 static inline void kvm_guest_enter(void)
 {
 
 #include <linux/smp.h>
 #include <linux/anon_inodes.h>
 #include <linux/profile.h>
+#include <linux/kvm_para.h>
 
 #include <asm/processor.h>
 #include <asm/msr.h>
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
 
-int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run)
+int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
 {
-       unsigned long nr, a0, a1, a2, a3, a4, a5, ret;
+       unsigned long nr, a0, a1, a2, a3, ret;
 
        kvm_x86_ops->cache_regs(vcpu);
-       ret = -KVM_EINVAL;
-#ifdef CONFIG_X86_64
-       if (is_long_mode(vcpu)) {
-               nr = vcpu->regs[VCPU_REGS_RAX];
-               a0 = vcpu->regs[VCPU_REGS_RDI];
-               a1 = vcpu->regs[VCPU_REGS_RSI];
-               a2 = vcpu->regs[VCPU_REGS_RDX];
-               a3 = vcpu->regs[VCPU_REGS_RCX];
-               a4 = vcpu->regs[VCPU_REGS_R8];
-               a5 = vcpu->regs[VCPU_REGS_R9];
-       } else
-#endif
-       {
-               nr = vcpu->regs[VCPU_REGS_RBX] & -1u;
-               a0 = vcpu->regs[VCPU_REGS_RAX] & -1u;
-               a1 = vcpu->regs[VCPU_REGS_RCX] & -1u;
-               a2 = vcpu->regs[VCPU_REGS_RDX] & -1u;
-               a3 = vcpu->regs[VCPU_REGS_RSI] & -1u;
-               a4 = vcpu->regs[VCPU_REGS_RDI] & -1u;
-               a5 = vcpu->regs[VCPU_REGS_RBP] & -1u;
+
+       nr = vcpu->regs[VCPU_REGS_RAX];
+       a0 = vcpu->regs[VCPU_REGS_RBX];
+       a1 = vcpu->regs[VCPU_REGS_RCX];
+       a2 = vcpu->regs[VCPU_REGS_RDX];
+       a3 = vcpu->regs[VCPU_REGS_RSI];
+
+       if (!is_long_mode(vcpu)) {
+               nr &= 0xFFFFFFFF;
+               a0 &= 0xFFFFFFFF;
+               a1 &= 0xFFFFFFFF;
+               a2 &= 0xFFFFFFFF;
+               a3 &= 0xFFFFFFFF;
        }
+
        switch (nr) {
        default:
-               run->hypercall.nr = nr;
-               run->hypercall.args[0] = a0;
-               run->hypercall.args[1] = a1;
-               run->hypercall.args[2] = a2;
-               run->hypercall.args[3] = a3;
-               run->hypercall.args[4] = a4;
-               run->hypercall.args[5] = a5;
-               run->hypercall.ret = ret;
-               run->hypercall.longmode = is_long_mode(vcpu);
-               kvm_x86_ops->decache_regs(vcpu);
-               return 0;
+               ret = -KVM_ENOSYS;
+               break;
        }
        vcpu->regs[VCPU_REGS_RAX] = ret;
        kvm_x86_ops->decache_regs(vcpu);
-       return 1;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
+
+int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
+{
+       char instruction[3];
+       int ret = 0;
+
+       mutex_lock(&vcpu->kvm->lock);
+
+       /*
+        * Blow out the MMU to ensure that no other VCPU has an active mapping
+        * to ensure that the updated hypercall appears atomically across all
+        * VCPUs.
+        */
+       kvm_mmu_zap_all(vcpu->kvm);
+
+       kvm_x86_ops->cache_regs(vcpu);
+       kvm_x86_ops->patch_hypercall(vcpu, instruction);
+       if (emulator_write_emulated(vcpu->rip, instruction, 3, vcpu)
+           != X86EMUL_CONTINUE)
+               ret = -EFAULT;
+
+       mutex_unlock(&vcpu->kvm->lock);
+
+       return ret;
 }
-EXPORT_SYMBOL_GPL(kvm_hypercall);
 
 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
 {
        }
 }
 
-/*
- * Register the para guest with the host:
- */
-static int vcpu_register_para(struct kvm_vcpu *vcpu, gpa_t para_state_gpa)
-{
-       struct kvm_vcpu_para_state *para_state;
-       hpa_t para_state_hpa, hypercall_hpa;
-       struct page *para_state_page;
-       unsigned char *hypercall;
-       gpa_t hypercall_gpa;
-
-       printk(KERN_DEBUG "kvm: guest trying to enter paravirtual mode\n");
-       printk(KERN_DEBUG ".... para_state_gpa: %08Lx\n", para_state_gpa);
-
-       /*
-        * Needs to be page aligned:
-        */
-       if (para_state_gpa != PAGE_ALIGN(para_state_gpa))
-               goto err_gp;
-
-       para_state_hpa = gpa_to_hpa(vcpu, para_state_gpa);
-       printk(KERN_DEBUG ".... para_state_hpa: %08Lx\n", para_state_hpa);
-       if (is_error_hpa(para_state_hpa))
-               goto err_gp;
-
-       mark_page_dirty(vcpu->kvm, para_state_gpa >> PAGE_SHIFT);
-       para_state_page = pfn_to_page(para_state_hpa >> PAGE_SHIFT);
-       para_state = kmap(para_state_page);
-
-       printk(KERN_DEBUG "....  guest version: %d\n", para_state->guest_version);
-       printk(KERN_DEBUG "....           size: %d\n", para_state->size);
-
-       para_state->host_version = KVM_PARA_API_VERSION;
-       /*
-        * We cannot support guests that try to register themselves
-        * with a newer API version than the host supports:
-        */
-       if (para_state->guest_version > KVM_PARA_API_VERSION) {
-               para_state->ret = -KVM_EINVAL;
-               goto err_kunmap_skip;
-       }
-
-       hypercall_gpa = para_state->hypercall_gpa;
-       hypercall_hpa = gpa_to_hpa(vcpu, hypercall_gpa);
-       printk(KERN_DEBUG ".... hypercall_hpa: %08Lx\n", hypercall_hpa);
-       if (is_error_hpa(hypercall_hpa)) {
-               para_state->ret = -KVM_EINVAL;
-               goto err_kunmap_skip;
-       }
-
-       printk(KERN_DEBUG "kvm: para guest successfully registered.\n");
-       vcpu->para_state_page = para_state_page;
-       vcpu->para_state_gpa = para_state_gpa;
-       vcpu->hypercall_gpa = hypercall_gpa;
-
-       mark_page_dirty(vcpu->kvm, hypercall_gpa >> PAGE_SHIFT);
-       hypercall = kmap_atomic(pfn_to_page(hypercall_hpa >> PAGE_SHIFT),
-                               KM_USER1) + (hypercall_hpa & ~PAGE_MASK);
-       kvm_x86_ops->patch_hypercall(vcpu, hypercall);
-       kunmap_atomic(hypercall, KM_USER1);
-
-       para_state->ret = 0;
-err_kunmap_skip:
-       kunmap(para_state_page);
-       return 0;
-err_gp:
-       return 1;
-}
-
 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
 {
        u64 data;
        case MSR_IA32_MISC_ENABLE:
                vcpu->ia32_misc_enable_msr = data;
                break;
-       /*
-        * This is the 'probe whether the host is KVM' logic:
-        */
-       case MSR_KVM_API_MAGIC:
-               return vcpu_register_para(vcpu, data);
-
        default:
                pr_unimpl(vcpu, "unhandled wrmsr: 0x%x\n", msr);
                return 1;
 
                                        INTERCEPT_DR5_MASK |
                                        INTERCEPT_DR7_MASK;
 
-       control->intercept_exceptions = 1 << PF_VECTOR;
+       control->intercept_exceptions = (1 << PF_VECTOR) |
+                                       (1 << UD_VECTOR);
 
 
        control->intercept =    (1ULL << INTERCEPT_INTR) |
        return 0;
 }
 
+static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
+       int er;
+
+       er = emulate_instruction(&svm->vcpu, kvm_run, 0, 0);
+       if (er != EMULATE_DONE)
+               inject_ud(&svm->vcpu);
+
+       return 1;
+}
+
 static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 {
        svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
 {
        svm->next_rip = svm->vmcb->save.rip + 3;
        skip_emulated_instruction(&svm->vcpu);
-       return kvm_hypercall(&svm->vcpu, kvm_run);
+       kvm_emulate_hypercall(&svm->vcpu);
+       return 1;
 }
 
 static int invalid_op_interception(struct vcpu_svm *svm,
        [SVM_EXIT_WRITE_DR3]                    = emulate_on_interception,
        [SVM_EXIT_WRITE_DR5]                    = emulate_on_interception,
        [SVM_EXIT_WRITE_DR7]                    = emulate_on_interception,
+       [SVM_EXIT_EXCP_BASE + UD_VECTOR]        = ud_interception,
        [SVM_EXIT_EXCP_BASE + PF_VECTOR]        = pf_interception,
        [SVM_EXIT_EXCP_BASE + NM_VECTOR]        = nm_interception,
        [SVM_EXIT_INTR]                         = nop_on_interception,
        hypercall[0] = 0x0f;
        hypercall[1] = 0x01;
        hypercall[2] = 0xd9;
-       hypercall[3] = 0xc3;
 }
 
 static void svm_check_processor_compat(void *rtn)
 
                (INTR_TYPE_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
 }
 
+static inline int is_invalid_opcode(u32 intr_info)
+{
+       return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
+                            INTR_INFO_VALID_MASK)) ==
+               (INTR_TYPE_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
+}
+
 static inline int is_external_interrupt(u32 intr_info)
 {
        return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
 {
        u32 eb;
 
-       eb = 1u << PF_VECTOR;
+       eb = (1u << PF_VECTOR) | (1u << UD_VECTOR);
        if (!vcpu->fpu_active)
                eb |= 1u << NM_VECTOR;
        if (vcpu->guest_debug.enabled)
                     INTR_INFO_VALID_MASK);
 }
 
+static void vmx_inject_ud(struct kvm_vcpu *vcpu)
+{
+       vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
+                    UD_VECTOR |
+                    INTR_TYPE_EXCEPTION |
+                    INTR_INFO_VALID_MASK);
+}
+
 /*
  * Swap MSR entry in host/guest MSR entry array.
  */
                return 1;
        }
 
+       if (is_invalid_opcode(intr_info)) {
+               er = emulate_instruction(vcpu, kvm_run, 0, 0);
+               if (er != EMULATE_DONE)
+                       vmx_inject_ud(vcpu);
+
+               return 1;
+       }
+
        error_code = 0;
        rip = vmcs_readl(GUEST_RIP);
        if (intr_info & INTR_INFO_DELIEVER_CODE_MASK)
        hypercall[0] = 0x0f;
        hypercall[1] = 0x01;
        hypercall[2] = 0xc1;
-       hypercall[3] = 0xc3;
 }
 
 static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        skip_emulated_instruction(vcpu);
-       return kvm_hypercall(vcpu, kvm_run);
+       kvm_emulate_hypercall(vcpu);
+       return 1;
 }
 
 /*
 
                        if (modrm_mod != 3 || modrm_rm != 1)
                                goto cannot_emulate;
 
-                       /* nop */
+                       rc = kvm_fix_hypercall(ctxt->vcpu);
+                       if (rc)
+                               goto done;
+
+                       kvm_emulate_hypercall(ctxt->vcpu);
                        break;
                case 2: /* lgdt */
                        rc = read_descriptor(ctxt, ops, src.ptr,
                        break;
                case 3: /* lidt/vmmcall */
                        if (modrm_mod == 3 && modrm_rm == 1) {
-                               /* nop */
+                               rc = kvm_fix_hypercall(ctxt->vcpu);
+                               if (rc)
+                                       goto done;
+                               kvm_emulate_hypercall(ctxt->vcpu);
                        } else {
                                rc = read_descriptor(ctxt, ops, src.ptr,
                                                     &size, &address,
 
 #ifndef __LINUX_KVM_PARA_H
 #define __LINUX_KVM_PARA_H
 
-/*
- * Guest OS interface for KVM paravirtualization
- *
- * Note: this interface is totally experimental, and is certain to change
- *       as we make progress.
+/* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx.  It
+ * should be used to determine that a VM is running under KVM.
  */
+#define KVM_CPUID_SIGNATURE    0x40000000
 
-/*
- * Per-VCPU descriptor area shared between guest and host. Writable to
- * both guest and host. Registered with the host by the guest when
- * a guest acknowledges paravirtual mode.
- *
- * NOTE: all addresses are guest-physical addresses (gpa), to make it
- * easier for the hypervisor to map between the various addresses.
- */
-struct kvm_vcpu_para_state {
-       /*
-        * API version information for compatibility. If there's any support
-        * mismatch (too old host trying to execute too new guest) then
-        * the host will deny entry into paravirtual mode. Any other
-        * combination (new host + old guest and new host + new guest)
-        * is supposed to work - new host versions will support all old
-        * guest API versions.
-        */
-       u32 guest_version;
-       u32 host_version;
-       u32 size;
-       u32 ret;
-
-       /*
-        * The address of the vm exit instruction (VMCALL or VMMCALL),
-        * which the host will patch according to the CPU model the
-        * VM runs on:
-        */
-       u64 hypercall_gpa;
-
-} __attribute__ ((aligned(PAGE_SIZE)));
-
-#define KVM_PARA_API_VERSION 1
-
-/*
- * This is used for an RDMSR's ECX parameter to probe for a KVM host.
- * Hopefully no CPU vendor will use up this number. This is placed well
- * out of way of the typical space occupied by CPU vendors' MSR indices,
- * and we think (or at least hope) it wont be occupied in the future
- * either.
+/* This CPUID returns a feature bitmap in eax.  Before enabling a particular
+ * paravirtualization, the appropriate feature bit should be checked.
  */
-#define MSR_KVM_API_MAGIC 0x87655678
+#define KVM_CPUID_FEATURES     0x40000001
 
-#define KVM_EINVAL 1
+/* Return values for hypercalls */
+#define KVM_ENOSYS             1000
 
-/*
- * Hypercall calling convention:
- *
- * Each hypercall may have 0-6 parameters.
- *
- * 64-bit hypercall index is in RAX, goes from 0 to __NR_hypercalls-1
- *
- * 64-bit parameters 1-6 are in the standard gcc x86_64 calling convention
- * order: RDI, RSI, RDX, RCX, R8, R9.
- *
- * 32-bit index is EBX, parameters are: EAX, ECX, EDX, ESI, EDI, EBP.
- * (the first 3 are according to the gcc regparm calling convention)
+#ifdef __KERNEL__
+#include <asm/processor.h>
+
+/* This instruction is vmcall.  On non-VT architectures, it will generate a
+ * trap that we will then rewrite to the appropriate instruction.
+ */
+#define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1"
+
+/* For KVM hypercalls, a three-byte sequence of either the vmrun or the vmmrun
+ * instruction.  The hypervisor may replace it with something else but only the
+ * instructions are guaranteed to be supported.
  *
- * No registers are clobbered by the hypercall, except that the
- * return value is in RAX.
+ * Up to four arguments may be passed in rbx, rcx, rdx, and rsi respectively.
+ * The hypercall number should be placed in rax and the return value will be
+ * placed in rax.  No other registers will be clobbered unless explicited
+ * noted by the particular hypercall.
  */
-#define __NR_hypercalls                        0
+
+static inline long kvm_hypercall0(unsigned int nr)
+{
+       long ret;
+       asm volatile(KVM_HYPERCALL
+                    : "=a"(ret)
+                    : "a"(nr));
+       return ret;
+}
+
+static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
+{
+       long ret;
+       asm volatile(KVM_HYPERCALL
+                    : "=a"(ret)
+                    : "a"(nr), "b"(p1));
+       return ret;
+}
+
+static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
+                                 unsigned long p2)
+{
+       long ret;
+       asm volatile(KVM_HYPERCALL
+                    : "=a"(ret)
+                    : "a"(nr), "b"(p1), "c"(p2));
+       return ret;
+}
+
+static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
+                                 unsigned long p2, unsigned long p3)
+{
+       long ret;
+       asm volatile(KVM_HYPERCALL
+                    : "=a"(ret)
+                    : "a"(nr), "b"(p1), "c"(p2), "d"(p3));
+       return ret;
+}
+
+static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
+                                 unsigned long p2, unsigned long p3,
+                                 unsigned long p4)
+{
+       long ret;
+       asm volatile(KVM_HYPERCALL
+                    : "=a"(ret)
+                    : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4));
+       return ret;
+}
+
+static inline int kvm_para_available(void)
+{
+       unsigned int eax, ebx, ecx, edx;
+       char signature[13];
+
+       cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
+       memcpy(signature + 0, &ebx, 4);
+       memcpy(signature + 4, &ecx, 4);
+       memcpy(signature + 8, &edx, 4);
+       signature[12] = 0;
+
+       if (strcmp(signature, "KVMKVMKVM") == 0)
+               return 1;
+
+       return 0;
+}
+
+static inline int kvm_para_has_feature(unsigned int feature)
+{
+       if (cpuid_eax(KVM_CPUID_FEATURES) & (1UL << feature))
+               return 1;
+       return 0;
+}
+
+#endif
 
 #endif