]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/i386/xen/enlighten.c
[MIPS] PCI: Set need_domain_info if controller domain index is non-zero.
[linux-2.6-omap-h63xx.git] / arch / i386 / xen / enlighten.c
index 86e68e680116c1950b4e4ffbfc1c83324b4fc8ba..f0c37511d8da82d6e30650a6d7f0cc6cbd6dd368 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/init.h>
 #include <linux/smp.h>
 #include <linux/preempt.h>
+#include <linux/hardirq.h>
 #include <linux/percpu.h>
 #include <linux/delay.h>
 #include <linux/start_kernel.h>
 #include <linux/mm.h>
 #include <linux/page-flags.h>
 #include <linux/highmem.h>
+#include <linux/smp.h>
 
 #include <xen/interface/xen.h>
 #include <xen/interface/physdev.h>
 #include <xen/interface/vcpu.h>
+#include <xen/interface/sched.h>
 #include <xen/features.h>
 #include <xen/page.h>
 
@@ -40,6 +43,8 @@
 #include <asm/setup.h>
 #include <asm/desc.h>
 #include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+#include <asm/reboot.h>
 
 #include "xen-ops.h"
 #include "mmu.h"
@@ -56,9 +61,64 @@ DEFINE_PER_CPU(unsigned long, xen_cr3);
 struct start_info *xen_start_info;
 EXPORT_SYMBOL_GPL(xen_start_info);
 
-static void xen_vcpu_setup(int cpu)
+static /* __initdata */ struct shared_info dummy_shared_info;
+
+/*
+ * Point at some empty memory to start with. We map the real shared_info
+ * page as soon as fixmap is up and running.
+ */
+struct shared_info *HYPERVISOR_shared_info = (void *)&dummy_shared_info;
+
+/*
+ * Flag to determine whether vcpu info placement is available on all
+ * VCPUs.  We assume it is to start with, and then set it to zero on
+ * the first failure.  This is because it can succeed on some VCPUs
+ * and not others, since it can involve hypervisor memory allocation,
+ * or because the guest failed to guarantee all the appropriate
+ * constraints on all VCPUs (ie buffer can't cross a page boundary).
+ *
+ * Note that any particular CPU may be using a placed vcpu structure,
+ * but we can only optimise if the all are.
+ *
+ * 0: not available, 1: available
+ */
+static int have_vcpu_info_placement = 1;
+
+static void __init xen_vcpu_setup(int cpu)
 {
+       struct vcpu_register_vcpu_info info;
+       int err;
+       struct vcpu_info *vcpup;
+
        per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
+
+       if (!have_vcpu_info_placement)
+               return;         /* already tested, not available */
+
+       vcpup = &per_cpu(xen_vcpu_info, cpu);
+
+       info.mfn = virt_to_mfn(vcpup);
+       info.offset = offset_in_page(vcpup);
+
+       printk(KERN_DEBUG "trying to map vcpu_info %d at %p, mfn %x, offset %d\n",
+              cpu, vcpup, info.mfn, info.offset);
+
+       /* Check to see if the hypervisor will put the vcpu_info
+          structure where we want it, which allows direct access via
+          a percpu-variable. */
+       err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
+
+       if (err) {
+               printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err);
+               have_vcpu_info_placement = 0;
+       } else {
+               /* This cpu is using the registered vcpu info, even if
+                  later ones fail to. */
+               per_cpu(xen_vcpu, cpu) = vcpup;
+
+               printk(KERN_DEBUG "cpu %d using vcpu_info at %p\n",
+                      cpu, vcpup);
+       }
 }
 
 static void __init xen_banner(void)
@@ -106,11 +166,10 @@ static unsigned long xen_save_fl(void)
        struct vcpu_info *vcpu;
        unsigned long flags;
 
-       preempt_disable();
        vcpu = x86_read_percpu(xen_vcpu);
+
        /* flag has opposite sense of mask */
        flags = !vcpu->evtchn_upcall_mask;
-       preempt_enable();
 
        /* convert to IF type flag
           -0 -> 0x00000000
@@ -123,32 +182,35 @@ static void xen_restore_fl(unsigned long flags)
 {
        struct vcpu_info *vcpu;
 
-       preempt_disable();
-
        /* convert from IF type flag */
        flags = !(flags & X86_EFLAGS_IF);
+
+       /* There's a one instruction preempt window here.  We need to
+          make sure we're don't switch CPUs between getting the vcpu
+          pointer and updating the mask. */
+       preempt_disable();
        vcpu = x86_read_percpu(xen_vcpu);
        vcpu->evtchn_upcall_mask = flags;
+       preempt_enable_no_resched();
 
-       if (flags == 0) {
-               /* Unmask then check (avoid races).  We're only protecting
-                  against updates by this CPU, so there's no need for
-                  anything stronger. */
-               barrier();
+       /* Doesn't matter if we get preempted here, because any
+          pending event will get dealt with anyway. */
 
+       if (flags == 0) {
+               preempt_check_resched();
+               barrier(); /* unmask then check (avoid races) */
                if (unlikely(vcpu->evtchn_upcall_pending))
                        force_evtchn_callback();
-               preempt_enable();
-       } else
-               preempt_enable_no_resched();
+       }
 }
 
 static void xen_irq_disable(void)
 {
-       struct vcpu_info *vcpu;
+       /* There's a one instruction preempt window here.  We need to
+          make sure we're don't switch CPUs between getting the vcpu
+          pointer and updating the mask. */
        preempt_disable();
-       vcpu = x86_read_percpu(xen_vcpu);
-       vcpu->evtchn_upcall_mask = 1;
+       x86_read_percpu(xen_vcpu)->evtchn_upcall_mask = 1;
        preempt_enable_no_resched();
 }
 
@@ -156,18 +218,20 @@ static void xen_irq_enable(void)
 {
        struct vcpu_info *vcpu;
 
+       /* There's a one instruction preempt window here.  We need to
+          make sure we're don't switch CPUs between getting the vcpu
+          pointer and updating the mask. */
        preempt_disable();
        vcpu = x86_read_percpu(xen_vcpu);
        vcpu->evtchn_upcall_mask = 0;
+       preempt_enable_no_resched();
 
-       /* Unmask then check (avoid races).  We're only protecting
-          against updates by this CPU, so there's no need for
-          anything stronger. */
-       barrier();
+       /* Doesn't matter if we get preempted here, because any
+          pending event will get dealt with anyway. */
 
+       barrier(); /* unmask then check (avoid races) */
        if (unlikely(vcpu->evtchn_upcall_pending))
                force_evtchn_callback();
-       preempt_enable();
 }
 
 static void xen_safe_halt(void)
@@ -187,6 +251,8 @@ static void xen_halt(void)
 
 static void xen_set_lazy_mode(enum paravirt_lazy_mode mode)
 {
+       BUG_ON(preemptible());
+
        switch (mode) {
        case PARAVIRT_LAZY_NONE:
                BUG_ON(x86_read_percpu(xen_lazy_mode) == PARAVIRT_LAZY_NONE);
@@ -282,6 +348,18 @@ static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
        load_TLS_descriptor(t, cpu, 2);
 
        xen_mc_issue(PARAVIRT_LAZY_CPU);
+
+       /*
+        * XXX sleazy hack: If we're being called in a lazy-cpu zone,
+        * it means we're in a context switch, and %gs has just been
+        * saved.  This means we can zero it out to prevent faults on
+        * exit from the hypervisor if the next process has no %gs.
+        * Either way, it has been saved, and the new value will get
+        * loaded properly.  This will go away as soon as Xen has been
+        * modified to not save/restore %gs for normal hypercalls.
+        */
+       if (xen_get_lazy_mode() == PARAVIRT_LAZY_CPU)
+               loadsegment(gs, 0);
 }
 
 static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
@@ -291,9 +369,13 @@ static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
        xmaddr_t mach_lp = virt_to_machine(lp);
        u64 entry = (u64)high << 32 | low;
 
+       preempt_disable();
+
        xen_mc_flush();
        if (HYPERVISOR_update_descriptor(mach_lp.maddr, entry))
                BUG();
+
+       preempt_enable();
 }
 
 static int cvt_gate_to_trap(int vector, u32 low, u32 high,
@@ -326,11 +408,13 @@ static DEFINE_PER_CPU(struct Xgt_desc_struct, idt_desc);
 static void xen_write_idt_entry(struct desc_struct *dt, int entrynum,
                                u32 low, u32 high)
 {
-
-       int cpu = smp_processor_id();
        unsigned long p = (unsigned long)&dt[entrynum];
-       unsigned long start = per_cpu(idt_desc, cpu).address;
-       unsigned long end = start + per_cpu(idt_desc, cpu).size + 1;
+       unsigned long start, end;
+
+       preempt_disable();
+
+       start = __get_cpu_var(idt_desc).address;
+       end = start + __get_cpu_var(idt_desc).size + 1;
 
        xen_mc_flush();
 
@@ -345,25 +429,18 @@ static void xen_write_idt_entry(struct desc_struct *dt, int entrynum,
                        if (HYPERVISOR_set_trap_table(info))
                                BUG();
        }
+
+       preempt_enable();
 }
 
-/* Load a new IDT into Xen.  In principle this can be per-CPU, so we
-   hold a spinlock to protect the static traps[] array (static because
-   it avoids allocation, and saves stack space). */
-static void xen_load_idt(const struct Xgt_desc_struct *desc)
+static void xen_convert_trap_info(const struct Xgt_desc_struct *desc,
+                                 struct trap_info *traps)
 {
-       static DEFINE_SPINLOCK(lock);
-       static struct trap_info traps[257];
-
-       int cpu = smp_processor_id();
        unsigned in, out, count;
 
-       per_cpu(idt_desc, cpu) = *desc;
-
        count = (desc->size+1) / 8;
        BUG_ON(count > 256);
 
-       spin_lock(&lock);
        for (in = out = 0; in < count; in++) {
                const u32 *entry = (u32 *)(desc->address + in * 8);
 
@@ -371,6 +448,28 @@ static void xen_load_idt(const struct Xgt_desc_struct *desc)
                        out++;
        }
        traps[out].address = 0;
+}
+
+void xen_copy_trap_info(struct trap_info *traps)
+{
+       const struct Xgt_desc_struct *desc = &__get_cpu_var(idt_desc);
+
+       xen_convert_trap_info(desc, traps);
+}
+
+/* Load a new IDT into Xen.  In principle this can be per-CPU, so we
+   hold a spinlock to protect the static traps[] array (static because
+   it avoids allocation, and saves stack space). */
+static void xen_load_idt(const struct Xgt_desc_struct *desc)
+{
+       static DEFINE_SPINLOCK(lock);
+       static struct trap_info traps[257];
+
+       spin_lock(&lock);
+
+       __get_cpu_var(idt_desc) = *desc;
+
+       xen_convert_trap_info(desc, traps);
 
        xen_mc_flush();
        if (HYPERVISOR_set_trap_table(traps))
@@ -384,6 +483,8 @@ static void xen_load_idt(const struct Xgt_desc_struct *desc)
 static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
                                u32 low, u32 high)
 {
+       preempt_disable();
+
        switch ((high >> 8) & 0xff) {
        case DESCTYPE_LDT:
        case DESCTYPE_TSS:
@@ -400,10 +501,12 @@ static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
        }
 
        }
+
+       preempt_enable();
 }
 
 static void xen_load_esp0(struct tss_struct *tss,
-                                  struct thread_struct *thread)
+                         struct thread_struct *thread)
 {
        struct multicall_space mcs = xen_mc_entry(0);
        MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->esp0);
@@ -428,25 +531,84 @@ static unsigned long xen_apic_read(unsigned long reg)
 {
        return 0;
 }
+
+static void xen_apic_write(unsigned long reg, unsigned long val)
+{
+       /* Warn to see if there's any stray references */
+       WARN_ON(1);
+}
 #endif
 
 static void xen_flush_tlb(void)
 {
-       struct mmuext_op op;
+       struct mmuext_op *op;
+       struct multicall_space mcs = xen_mc_entry(sizeof(*op));
 
-       op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
-       if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
-               BUG();
+       op = mcs.args;
+       op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
+       MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+       xen_mc_issue(PARAVIRT_LAZY_MMU);
 }
 
 static void xen_flush_tlb_single(unsigned long addr)
 {
-       struct mmuext_op op;
+       struct mmuext_op *op;
+       struct multicall_space mcs = xen_mc_entry(sizeof(*op));
 
-       op.cmd = MMUEXT_INVLPG_LOCAL;
-       op.arg1.linear_addr = addr & PAGE_MASK;
-       if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
-               BUG();
+       op = mcs.args;
+       op->cmd = MMUEXT_INVLPG_LOCAL;
+       op->arg1.linear_addr = addr & PAGE_MASK;
+       MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+       xen_mc_issue(PARAVIRT_LAZY_MMU);
+}
+
+static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
+                                unsigned long va)
+{
+       struct {
+               struct mmuext_op op;
+               cpumask_t mask;
+       } *args;
+       cpumask_t cpumask = *cpus;
+       struct multicall_space mcs;
+
+       /*
+        * A couple of (to be removed) sanity checks:
+        *
+        * - current CPU must not be in mask
+        * - mask must exist :)
+        */
+       BUG_ON(cpus_empty(cpumask));
+       BUG_ON(cpu_isset(smp_processor_id(), cpumask));
+       BUG_ON(!mm);
+
+       /* If a CPU which we ran on has gone down, OK. */
+       cpus_and(cpumask, cpumask, cpu_online_map);
+       if (cpus_empty(cpumask))
+               return;
+
+       mcs = xen_mc_entry(sizeof(*args));
+       args = mcs.args;
+       args->mask = cpumask;
+       args->op.arg2.vcpumask = &args->mask;
+
+       if (va == TLB_FLUSH_ALL) {
+               args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
+       } else {
+               args->op.cmd = MMUEXT_INVLPG_MULTI;
+               args->op.arg1.linear_addr = va;
+       }
+
+       MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
+
+       xen_mc_issue(PARAVIRT_LAZY_MMU);
+}
+
+static void xen_write_cr2(unsigned long cr2)
+{
+       x86_read_percpu(xen_vcpu)->arch.cr2 = cr2;
 }
 
 static unsigned long xen_read_cr2(void)
@@ -454,24 +616,17 @@ static unsigned long xen_read_cr2(void)
        return x86_read_percpu(xen_vcpu)->arch.cr2;
 }
 
+static unsigned long xen_read_cr2_direct(void)
+{
+       return x86_read_percpu(xen_vcpu_info.arch.cr2);
+}
+
 static void xen_write_cr4(unsigned long cr4)
 {
        /* never allow TSC to be disabled */
        native_write_cr4(cr4 & ~X86_CR4_TSD);
 }
 
-/*
- * Page-directory addresses above 4GB do not fit into architectural %cr3.
- * When accessing %cr3, or equivalent field in vcpu_guest_context, guests
- * must use the following accessor macros to pack/unpack valid MFNs.
- *
- * Note that Xen is using the fact that the pagetable base is always
- * page-aligned, and putting the 12 MSB of the address into the 12 LSB
- * of cr3.
- */
-#define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20))
-#define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20))
-
 static unsigned long xen_read_cr3(void)
 {
        return x86_read_percpu(xen_cr3);
@@ -479,6 +634,8 @@ static unsigned long xen_read_cr3(void)
 
 static void xen_write_cr3(unsigned long cr3)
 {
+       BUG_ON(preemptible());
+
        if (cr3 == x86_read_percpu(xen_cr3)) {
                /* just a simple tlb flush */
                xen_flush_tlb();
@@ -505,7 +662,7 @@ static void xen_write_cr3(unsigned long cr3)
 
 /* Early in boot, while setting up the initial pagetable, assume
    everything is pinned. */
-static void xen_alloc_pt_init(struct mm_struct *mm, u32 pfn)
+static __init void xen_alloc_pt_init(struct mm_struct *mm, u32 pfn)
 {
        BUG_ON(mem_map);        /* should only be used early */
        make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
@@ -557,10 +714,32 @@ static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
 }
 #endif
 
+static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
+{
+       /* If there's an existing pte, then don't allow _PAGE_RW to be set */
+       if (pte_val_ma(*ptep) & _PAGE_PRESENT)
+               pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
+                              pte_val_ma(pte));
+
+       return pte;
+}
+
+/* Init-time set_pte while constructing initial pagetables, which
+   doesn't allow RO pagetable pages to be remapped RW */
+static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
+{
+       pte = mask_rw_pte(ptep, pte);
+
+       xen_set_pte(ptep, pte);
+}
+
 static __init void xen_pagetable_setup_start(pgd_t *base)
 {
        pgd_t *xen_pgd = (pgd_t *)xen_start_info->pt_base;
 
+       /* special set_pte for pagetable initialization */
+       paravirt_ops.set_pte = xen_set_pte_init;
+
        init_mm.pgd = base;
        /*
         * copy top-level of Xen-supplied pagetable into place.  For
@@ -607,6 +786,7 @@ static __init void xen_pagetable_setup_done(pgd_t *base)
        /* This will work as long as patching hasn't happened yet
           (which it hasn't) */
        paravirt_ops.alloc_pt = xen_alloc_pt;
+       paravirt_ops.set_pte = xen_set_pte;
 
        if (!xen_feature(XENFEAT_auto_translated_physmap)) {
                /*
@@ -638,8 +818,81 @@ static __init void xen_pagetable_setup_done(pgd_t *base)
                if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
                        BUG();
        }
+}
 
-       xen_vcpu_setup(smp_processor_id());
+/* This is called once we have the cpu_possible_map */
+void __init xen_setup_vcpu_info_placement(void)
+{
+       int cpu;
+
+       for_each_possible_cpu(cpu)
+               xen_vcpu_setup(cpu);
+
+       /* xen_vcpu_setup managed to place the vcpu_info within the
+          percpu area for all cpus, so make use of it */
+       if (have_vcpu_info_placement) {
+               printk(KERN_INFO "Xen: using vcpu_info placement\n");
+
+               paravirt_ops.save_fl = xen_save_fl_direct;
+               paravirt_ops.restore_fl = xen_restore_fl_direct;
+               paravirt_ops.irq_disable = xen_irq_disable_direct;
+               paravirt_ops.irq_enable = xen_irq_enable_direct;
+               paravirt_ops.read_cr2 = xen_read_cr2_direct;
+               paravirt_ops.iret = xen_iret_direct;
+       }
+}
+
+static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
+                         unsigned long addr, unsigned len)
+{
+       char *start, *end, *reloc;
+       unsigned ret;
+
+       start = end = reloc = NULL;
+
+#define SITE(x)                                                                \
+       case PARAVIRT_PATCH(x):                                         \
+       if (have_vcpu_info_placement) {                                 \
+               start = (char *)xen_##x##_direct;                       \
+               end = xen_##x##_direct_end;                             \
+               reloc = xen_##x##_direct_reloc;                         \
+       }                                                               \
+       goto patch_site
+
+       switch (type) {
+               SITE(irq_enable);
+               SITE(irq_disable);
+               SITE(save_fl);
+               SITE(restore_fl);
+#undef SITE
+
+       patch_site:
+               if (start == NULL || (end-start) > len)
+                       goto default_patch;
+
+               ret = paravirt_patch_insns(insnbuf, len, start, end);
+
+               /* Note: because reloc is assigned from something that
+                  appears to be an array, gcc assumes it's non-null,
+                  but doesn't know its relationship with start and
+                  end. */
+               if (reloc > start && reloc < end) {
+                       int reloc_off = reloc - start;
+                       long *relocp = (long *)(insnbuf + reloc_off);
+                       long delta = start - (char *)addr;
+
+                       *relocp += delta;
+               }
+               break;
+
+       default_patch:
+       default:
+               ret = paravirt_patch_default(type, clobbers, insnbuf,
+                                            addr, len);
+               break;
+       }
+
+       return ret;
 }
 
 static const struct paravirt_ops xen_paravirt_ops __initdata = {
@@ -649,7 +902,7 @@ static const struct paravirt_ops xen_paravirt_ops __initdata = {
        .name = "Xen",
        .banner = xen_banner,
 
-       .patch = paravirt_patch_default,
+       .patch = xen_patch,
 
        .memory_setup = xen_memory_setup,
        .arch_setup = xen_arch_setup,
@@ -660,7 +913,7 @@ static const struct paravirt_ops xen_paravirt_ops __initdata = {
        .set_wallclock = xen_set_wallclock,
        .get_wallclock = xen_get_wallclock,
        .get_cpu_khz = xen_cpu_khz,
-       .sched_clock = xen_clocksource_read,
+       .sched_clock = xen_sched_clock,
 
        .cpuid = xen_cpuid,
 
@@ -673,7 +926,7 @@ static const struct paravirt_ops xen_paravirt_ops __initdata = {
        .write_cr0 = native_write_cr0,
 
        .read_cr2 = xen_read_cr2,
-       .write_cr2 = native_write_cr2,
+       .write_cr2 = xen_write_cr2,
 
        .read_cr3 = xen_read_cr3,
        .write_cr3 = xen_write_cr3,
@@ -717,8 +970,8 @@ static const struct paravirt_ops xen_paravirt_ops __initdata = {
        .io_delay = xen_io_delay,
 
 #ifdef CONFIG_X86_LOCAL_APIC
-       .apic_write = paravirt_nop,
-       .apic_write_atomic = paravirt_nop,
+       .apic_write = xen_apic_write,
+       .apic_write_atomic = xen_apic_write,
        .apic_read = xen_apic_read,
        .setup_boot_clock = paravirt_nop,
        .setup_secondary_clock = paravirt_nop,
@@ -728,6 +981,7 @@ static const struct paravirt_ops xen_paravirt_ops __initdata = {
        .flush_tlb_user = xen_flush_tlb,
        .flush_tlb_kernel = xen_flush_tlb,
        .flush_tlb_single = xen_flush_tlb_single,
+       .flush_tlb_others = xen_flush_tlb_others,
 
        .pte_update = paravirt_nop,
        .pte_update_defer = paravirt_nop,
@@ -745,7 +999,7 @@ static const struct paravirt_ops xen_paravirt_ops __initdata = {
        .kmap_atomic_pte = xen_kmap_atomic_pte,
 #endif
 
-       .set_pte = xen_set_pte,
+       .set_pte = NULL,        /* see xen_pagetable_setup_* */
        .set_pte_at = xen_set_pte_at,
        .set_pmd = xen_set_pmd,
 
@@ -773,6 +1027,59 @@ static const struct paravirt_ops xen_paravirt_ops __initdata = {
        .set_lazy_mode = xen_set_lazy_mode,
 };
 
+#ifdef CONFIG_SMP
+static const struct smp_ops xen_smp_ops __initdata = {
+       .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
+       .smp_prepare_cpus = xen_smp_prepare_cpus,
+       .cpu_up = xen_cpu_up,
+       .smp_cpus_done = xen_smp_cpus_done,
+
+       .smp_send_stop = xen_smp_send_stop,
+       .smp_send_reschedule = xen_smp_send_reschedule,
+       .smp_call_function_mask = xen_smp_call_function_mask,
+};
+#endif /* CONFIG_SMP */
+
+static void xen_reboot(int reason)
+{
+#ifdef CONFIG_SMP
+       smp_send_stop();
+#endif
+
+       if (HYPERVISOR_sched_op(SCHEDOP_shutdown, reason))
+               BUG();
+}
+
+static void xen_restart(char *msg)
+{
+       xen_reboot(SHUTDOWN_reboot);
+}
+
+static void xen_emergency_restart(void)
+{
+       xen_reboot(SHUTDOWN_reboot);
+}
+
+static void xen_machine_halt(void)
+{
+       xen_reboot(SHUTDOWN_poweroff);
+}
+
+static void xen_crash_shutdown(struct pt_regs *regs)
+{
+       xen_reboot(SHUTDOWN_crash);
+}
+
+static const struct machine_ops __initdata xen_machine_ops = {
+       .restart = xen_restart,
+       .halt = xen_machine_halt,
+       .power_off = xen_machine_halt,
+       .shutdown = xen_machine_halt,
+       .crash_shutdown = xen_crash_shutdown,
+       .emergency_restart = xen_emergency_restart,
+};
+
+
 /* First C function to be called on Xen boot */
 asmlinkage void __init xen_start_kernel(void)
 {
@@ -785,6 +1092,11 @@ asmlinkage void __init xen_start_kernel(void)
 
        /* Install Xen paravirt ops */
        paravirt_ops = xen_paravirt_ops;
+       machine_ops = xen_machine_ops;
+
+#ifdef CONFIG_SMP
+       smp_ops = xen_smp_ops;
+#endif
 
        xen_setup_features();
 
@@ -801,7 +1113,16 @@ asmlinkage void __init xen_start_kernel(void)
        /* keep using Xen gdt for now; no urgent need to change it */
 
        x86_write_percpu(xen_cr3, __pa(pgd));
-       xen_vcpu_setup(0);
+
+#ifdef CONFIG_SMP
+       /* Don't do the full vcpu_info placement stuff until we have a
+          possible map. */
+       per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
+#else
+       /* May as well do it now, since there's no good time to call
+          it later on UP. */
+       xen_setup_vcpu_info_placement();
+#endif
 
        paravirt_ops.kernel_rpl = 1;
        if (xen_feature(XENFEAT_supervisor_mode_kernel))