]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/x86/xen/xen-asm_64.S
Merge branches 'core/signal' and 'x86/spinlocks' into x86/xen
[linux-2.6-omap-h63xx.git] / arch / x86 / xen / xen-asm_64.S
index 4038cbfe33319ac954b83437d45b9c189af052da..05794c566e8799382f9a314970661d5ab232e4a8 100644 (file)
 /* Pseudo-flag used for virtual NMI, which we don't implement yet */
 #define XEN_EFLAGS_NMI 0x80000000
 
-#if 0
-#include <asm/percpu.h>
+#if 1
+/*
+       x86-64 does not yet support direct access to percpu variables
+       via a segment override, so we just need to make sure this code
+       never gets used
+ */
+#define BUG                    ud2a
+#define PER_CPU_VAR(var, off)  0xdeadbeef
+#endif
 
 /*
        Enable events.  This clears the event mask and tests the pending
@@ -35,6 +42,8 @@
        events, then enter the hypervisor to get them handled.
  */
 ENTRY(xen_irq_enable_direct)
+       BUG
+
        /* Unmask events */
        movb $0, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
 
@@ -58,6 +67,8 @@ ENDPATCH(xen_irq_enable_direct)
        non-zero.
  */
 ENTRY(xen_irq_disable_direct)
+       BUG
+
        movb $1, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
 ENDPATCH(xen_irq_disable_direct)
        ret
@@ -74,6 +85,8 @@ ENDPATCH(xen_irq_disable_direct)
        Xen and x86 use opposite senses (mask vs enable).
  */
 ENTRY(xen_save_fl_direct)
+       BUG
+
        testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
        setz %ah
        addb %ah,%ah
@@ -91,6 +104,8 @@ ENDPATCH(xen_save_fl_direct)
        if so.
  */
 ENTRY(xen_restore_fl_direct)
+       BUG
+
        testb $X86_EFLAGS_IF>>8, %ah
        setz PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
        /* Preempt here doesn't matter because that will deal with
@@ -122,7 +137,7 @@ check_events:
        push %r9
        push %r10
        push %r11
-       call force_evtchn_callback
+       call xen_force_evtchn_callback
        pop %r11
        pop %r10
        pop %r9
@@ -133,7 +148,6 @@ check_events:
        pop %rcx
        pop %rax
        ret
-#endif
 
 ENTRY(xen_adjust_exception_frame)
        mov 8+0(%rsp),%rcx
@@ -173,7 +187,7 @@ ENTRY(xen_sysexit)
        pushq $__USER32_CS
        pushq %rdx
 
-       pushq $VGCF_in_syscall
+       pushq $0
 1:     jmp hypercall_iret
 ENDPATCH(xen_sysexit)
 RELOC(xen_sysexit, 1b+1)