#define raw_smp_processor_id() (current_thread_info()->cpu)
 
 extern int smp_num_cpus;
-#define cpu_possible_map       cpu_present_map
 
 extern void arch_send_call_function_single_ipi(int cpu);
 extern void arch_send_call_function_ipi(cpumask_t mask);
 
                flags |= 0x00040000UL; /* "remain halted" */
                *pflags = flags;
                cpu_clear(cpuid, cpu_present_map);
+               cpu_clear(cpuid, cpu_possible_map);
                halt();
        }
 #endif
 #ifdef CONFIG_SMP
        /* Wait for the secondaries to halt. */
        cpu_clear(boot_cpuid, cpu_present_map);
+       cpu_clear(boot_cpuid, cpu_possible_map);
        while (cpus_weight(cpu_present_map))
                barrier();
 #endif
 
 /* Set to a secondary's cpuid when it comes online.  */
 static int smp_secondary_alive __devinitdata = 0;
 
-/* Which cpus ids came online.  */
-cpumask_t cpu_online_map;
-
-EXPORT_SYMBOL(cpu_online_map);
-
 int smp_num_probed;            /* Internal processor count */
 int smp_num_cpus = 1;          /* Number that came online.  */
 EXPORT_SYMBOL(smp_num_cpus);
                                ((char *)cpubase + i*hwrpb->processor_size);
                        if ((cpu->flags & 0x1cc) == 0x1cc) {
                                smp_num_probed++;
+                               cpu_set(i, cpu_possible_map);
                                cpu_set(i, cpu_present_map);
                                cpu->pal_revision = boot_cpu_palrev;
                        }
 
        /* Nothing to do on a UP box, or when told not to.  */
        if (smp_num_probed == 1 || max_cpus == 0) {
+               cpu_possible_map = cpumask_of_cpu(boot_cpuid);
                cpu_present_map = cpumask_of_cpu(boot_cpuid);
                printk(KERN_INFO "SMP mode deactivated.\n");
                return;
 
 #include <asm/tlbflush.h>
 #include <asm/ptrace.h>
 
-/*
- * bitmask of present and online CPUs.
- * The present bitmask indicates that the CPU is physically present.
- * The online bitmask indicates that the CPU is up and running.
- */
-cpumask_t cpu_possible_map;
-EXPORT_SYMBOL(cpu_possible_map);
-cpumask_t cpu_online_map;
-EXPORT_SYMBOL(cpu_online_map);
-
 /*
  * as from 2.5, kernels no longer have an init_tasks structure
  * so we need some other way of telling a new secondary core
 
 spinlock_t cris_atomic_locks[] = { [0 ... LOCK_COUNT - 1] = SPIN_LOCK_UNLOCKED};
 
 /* CPU masks */
-cpumask_t cpu_online_map = CPU_MASK_NONE;
-EXPORT_SYMBOL(cpu_online_map);
 cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
-cpumask_t cpu_possible_map;
-EXPORT_SYMBOL(cpu_possible_map);
 EXPORT_SYMBOL(phys_cpu_present_map);
 
 /* Variables used during SMP boot */
 
 #include <linux/cpumask.h>
 
 extern cpumask_t phys_cpu_present_map;
-extern cpumask_t cpu_possible_map;
 
 #define raw_smp_processor_id() (current_thread_info()->cpu)
 
 
 
 extern char no_int_routing __devinitdata;
 
-extern cpumask_t cpu_online_map;
 extern cpumask_t cpu_core_map[NR_CPUS];
 DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
 extern int smp_num_siblings;
 
  */
 DEFINE_PER_CPU(int, cpu_state);
 
-/* Bitmasks of currently online, and possible CPUs */
-cpumask_t cpu_online_map;
-EXPORT_SYMBOL(cpu_online_map);
-cpumask_t cpu_possible_map = CPU_MASK_NONE;
-EXPORT_SYMBOL(cpu_possible_map);
-
 cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
 EXPORT_SYMBOL(cpu_core_map);
 DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map);
 
        default y
        select HAVE_IDE
        select HAVE_OPROFILE
+       select INIT_ALL_POSSIBLE
 
 config SBUS
        bool
 
 /* Bitmask of physically existing CPUs */
 physid_mask_t phys_cpu_present_map;
 
-/* Bitmask of currently online CPUs */
-cpumask_t cpu_online_map;
-EXPORT_SYMBOL(cpu_online_map);
-
 cpumask_t cpu_bootout_map;
 cpumask_t cpu_bootin_map;
 static cpumask_t cpu_callin_map;
 cpumask_t cpu_callout_map;
 EXPORT_SYMBOL(cpu_callout_map);
-cpumask_t cpu_possible_map = CPU_MASK_ALL;
-EXPORT_SYMBOL(cpu_possible_map);
 
 /* Per CPU bogomips and other parameters */
 struct cpuinfo_m32r cpu_data[NR_CPUS] __cacheline_aligned;
 
 #define SMP_RESCHEDULE_YOURSELF        0x1     /* XXX braindead */
 #define SMP_CALL_FUNCTION      0x2
 
-extern cpumask_t phys_cpu_present_map;
-#define cpu_possible_map       phys_cpu_present_map
-
 extern void asmlinkage smp_bootstrap(void);
 
 /*
 
 
        for (i = 1; i < NR_CPUS; i++) {
                if (amon_cpu_avail(i)) {
-                       cpu_set(i, phys_cpu_present_map);
+                       cpu_set(i, cpu_possible_map);
                        __cpu_number_map[i]     = ++ncpu;
                        __cpu_logical_map[ncpu] = i;
                }
 
                write_vpe_c0_vpeconf0(tmp);
 
                /* Record this as available CPU */
-               cpu_set(tc, phys_cpu_present_map);
+               cpu_set(tc, cpu_possible_map);
                __cpu_number_map[tc]    = ++ncpu;
                __cpu_logical_map[ncpu] = tc;
        }
 
 #include <asm/mipsmtregs.h>
 #endif /* CONFIG_MIPS_MT_SMTC */
 
-cpumask_t phys_cpu_present_map;                /* Bitmask of available CPUs */
 volatile cpumask_t cpu_callin_map;     /* Bitmask of started secondaries */
-cpumask_t cpu_online_map;              /* Bitmask of currently online CPUs */
 int __cpu_number_map[NR_CPUS];         /* Map physical to logical */
 int __cpu_logical_map[NR_CPUS];                /* Map logical to physical */
 
-EXPORT_SYMBOL(phys_cpu_present_map);
-EXPORT_SYMBOL(cpu_online_map);
-
 extern void cpu_idle(void);
 
 /* Number of TCs (or siblings in Intel speak) per CPU core */
 /* preload SMP state for boot cpu */
 void __devinit smp_prepare_boot_cpu(void)
 {
-       cpu_set(0, phys_cpu_present_map);
+       cpu_set(0, cpu_possible_map);
        cpu_set(0, cpu_online_map);
        cpu_set(0, cpu_callin_map);
 }
 
  * possibly leave some TCs/VPEs as "slave" processors.
  *
  * Use c0_MVPConf0 to find out how many TCs are available, setting up
- * phys_cpu_present_map and the logical/physical mappings.
+ * cpu_possible_map and the logical/physical mappings.
  */
 
 int __init smtc_build_cpu_map(int start_cpu_slot)
         */
        ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
        for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
-               cpu_set(i, phys_cpu_present_map);
+               cpu_set(i, cpu_possible_map);
                __cpu_number_map[i] = i;
                __cpu_logical_map[i] = i;
        }
         * Pull any physically present but unused TCs out of circulation.
         */
        while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
-               cpu_clear(tc, phys_cpu_present_map);
+               cpu_clear(tc, cpu_possible_map);
                cpu_clear(tc, cpu_present_map);
                tc++;
        }
 
 }
 
 /*
- * Detect available CPUs, populate phys_cpu_present_map before smp_init
+ * Detect available CPUs, populate cpu_possible_map before smp_init
  *
  * We don't want to start the secondary CPU yet nor do we have a nice probing
  * feature in PMON so we just assume presence of the secondary core.
 {
        int i;
 
-       cpus_clear(phys_cpu_present_map);
+       cpus_clear(cpu_possible_map);
 
        for (i = 0; i < 2; i++) {
-               cpu_set(i, phys_cpu_present_map);
+               cpu_set(i, cpu_possible_map);
                __cpu_number_map[i]     = i;
                __cpu_logical_map[i]    = i;
        }
 
                        /* Only let it join in if it's marked enabled */
                        if ((acpu->cpu_info.flags & KLINFO_ENABLE) &&
                            (tot_cpus_found != NR_CPUS)) {
-                               cpu_set(cpuid, phys_cpu_present_map);
+                               cpu_set(cpuid, cpu_possible_map);
                                alloc_cpupda(cpuid, tot_cpus_found);
                                cpus_found++;
                                tot_cpus_found++;
 
 
 /*
  * Use CFE to find out how many CPUs are available, setting up
- * phys_cpu_present_map and the logical/physical mappings.
+ * cpu_possible_map and the logical/physical mappings.
  * XXXKW will the boot CPU ever not be physical 0?
  *
  * Common setup before any secondaries are started
 {
        int i, num;
 
-       cpus_clear(phys_cpu_present_map);
-       cpu_set(0, phys_cpu_present_map);
+       cpus_clear(cpu_possible_map);
+       cpu_set(0, cpu_possible_map);
        __cpu_number_map[0] = 0;
        __cpu_logical_map[0] = 0;
 
        for (i = 1, num = 0; i < NR_CPUS; i++) {
                if (cfe_cpu_stop(i) == 0) {
-                       cpu_set(i, phys_cpu_present_map);
+                       cpu_set(i, cpu_possible_map);
                        __cpu_number_map[i] = ++num;
                        __cpu_logical_map[num] = i;
                }
 
 
 /*
  * Use CFE to find out how many CPUs are available, setting up
- * phys_cpu_present_map and the logical/physical mappings.
+ * cpu_possible_map and the logical/physical mappings.
  * XXXKW will the boot CPU ever not be physical 0?
  *
  * Common setup before any secondaries are started
 {
        int i, num;
 
-       cpus_clear(phys_cpu_present_map);
-       cpu_set(0, phys_cpu_present_map);
+       cpus_clear(cpu_possible_map);
+       cpu_set(0, cpu_possible_map);
        __cpu_number_map[0] = 0;
        __cpu_logical_map[0] = 0;
 
        for (i = 1, num = 0; i < NR_CPUS; i++) {
                if (cfe_cpu_stop(i) == 0) {
-                       cpu_set(i, phys_cpu_present_map);
+                       cpu_set(i, cpu_possible_map);
                        __cpu_number_map[i] = ++num;
                        __cpu_logical_map[num] = i;
                }
 
        select HAVE_OPROFILE
        select RTC_CLASS
        select RTC_DRV_PARISC
+       select INIT_ALL_POSSIBLE
        help
          The PA-RISC microprocessor is designed by Hewlett-Packard and used
          in many of their workstations & servers (HP9000 700 and 800 series,
 
 
 static int parisc_max_cpus __read_mostly = 1;
 
-/* online cpus are ones that we've managed to bring up completely
- * possible cpus are all valid cpu 
- * present cpus are all detected cpu
- *
- * On startup we bring up the "possible" cpus. Since we discover
- * CPUs later, we add them as hotplug, so the possible cpu mask is
- * empty in the beginning.
- */
-
-cpumask_t cpu_online_map   __read_mostly = CPU_MASK_NONE;      /* Bitmap of online CPUs */
-cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;       /* Bitmap of Present CPUs */
-
-EXPORT_SYMBOL(cpu_online_map);
-EXPORT_SYMBOL(cpu_possible_map);
-
 DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED;
 
 enum ipi_message_type {
 
 int smp_hw_index[NR_CPUS];
 struct thread_info *secondary_ti;
 
-cpumask_t cpu_possible_map = CPU_MASK_NONE;
-cpumask_t cpu_online_map = CPU_MASK_NONE;
 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
 DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE;
 
-EXPORT_SYMBOL(cpu_online_map);
-EXPORT_SYMBOL(cpu_possible_map);
 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
 
 
        select HAVE_KRETPROBES
        select HAVE_KVM if 64BIT
        select HAVE_ARCH_TRACEHOOK
+       select INIT_ALL_POSSIBLE
 
 source "init/Kconfig"
 
 
 struct _lowcore *lowcore_ptr[NR_CPUS];
 EXPORT_SYMBOL(lowcore_ptr);
 
-cpumask_t cpu_online_map = CPU_MASK_NONE;
-EXPORT_SYMBOL(cpu_online_map);
-
-cpumask_t cpu_possible_map = CPU_MASK_ALL;
-EXPORT_SYMBOL(cpu_possible_map);
-
 static struct task_struct *current_set[NR_CPUS];
 
 static u8 smp_cpu_type;
 
 int __cpu_number_map[NR_CPUS];         /* Map physical to logical */
 int __cpu_logical_map[NR_CPUS];                /* Map logical to physical */
 
-cpumask_t cpu_possible_map;
-EXPORT_SYMBOL(cpu_possible_map);
-
-cpumask_t cpu_online_map;
-EXPORT_SYMBOL(cpu_online_map);
-
 static inline void __init smp_store_cpu_info(unsigned int cpu)
 {
        struct sh_cpuinfo *c = cpu_data + cpu;
 
  */
 
 extern unsigned char boot_cpu_id;
-extern cpumask_t phys_cpu_present_map;
-#define cpu_possible_map phys_cpu_present_map
 
 typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
                       unsigned long, unsigned long);
 
 unsigned char boot_cpu_id = 0;
 unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */
 
-cpumask_t cpu_online_map = CPU_MASK_NONE;
-cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
 cpumask_t smp_commenced_mask = CPU_MASK_NONE;
 
 /* The only guaranteed locking primitive available on all Sparc
        instance = 0;
        while (!cpu_find_by_instance(instance, NULL, &mid)) {
                if (mid < NR_CPUS) {
-                       cpu_set(mid, phys_cpu_present_map);
+                       cpu_set(mid, cpu_possible_map);
                        cpu_set(mid, cpu_present_map);
                }
                instance++;
 
        current_thread_info()->cpu = cpuid;
        cpu_set(cpuid, cpu_online_map);
-       cpu_set(cpuid, phys_cpu_present_map);
+       cpu_set(cpuid, cpu_possible_map);
 }
 
 int __cpuinit __cpu_up(unsigned int cpu)
 
 #ifdef CONFIG_SMP
 /* IRQ implementation. */
 EXPORT_SYMBOL(synchronize_irq);
-
-/* CPU online map and active count. */
-EXPORT_SYMBOL(cpu_online_map);
-EXPORT_SYMBOL(phys_cpu_present_map);
 #endif
 
 EXPORT_SYMBOL(__udelay);
 
 
 int sparc64_multi_core __read_mostly;
 
-cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE;
-cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
 cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
        { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
 
-EXPORT_SYMBOL(cpu_possible_map);
-EXPORT_SYMBOL(cpu_online_map);
 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
 EXPORT_SYMBOL(cpu_core_map);
 
 
 #include "irq_user.h"
 #include "os.h"
 
-/* CPU online map, set by smp_boot_cpus */
-cpumask_t cpu_online_map = CPU_MASK_NONE;
-cpumask_t cpu_possible_map = CPU_MASK_NONE;
-
-EXPORT_SYMBOL(cpu_online_map);
-EXPORT_SYMBOL(cpu_possible_map);
-
 /* Per CPU bogomips and other parameters
  * The only piece used here is the ipi pipe, which is set before SMP is
  * started and never changed.
 
 /* Last level cache ID of each logical CPU */
 DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
 
-/* bitmap of online cpus */
-cpumask_t cpu_online_map __read_mostly;
-EXPORT_SYMBOL(cpu_online_map);
-
 cpumask_t cpu_callin_map;
 cpumask_t cpu_callout_map;
-cpumask_t cpu_possible_map;
-EXPORT_SYMBOL(cpu_possible_map);
 
 /* representing HT siblings of each logical CPU */
 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
 
 /* Used for the invalidate map that's also checked in the spinlock */
 static volatile unsigned long smp_invalidate_needed;
 
-/* Bitmask of currently online CPUs - used by setup.c for
-   /proc/cpuinfo, visible externally but still physical */
-cpumask_t cpu_online_map = CPU_MASK_NONE;
-EXPORT_SYMBOL(cpu_online_map);
-
 /* Bitmask of CPUs present in the system - exported by i386_syms.c, used
  * by scheduler but indexed physically */
 cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
 /* This is for the new dynamic CPU boot code */
 cpumask_t cpu_callin_map = CPU_MASK_NONE;
 cpumask_t cpu_callout_map = CPU_MASK_NONE;
-cpumask_t cpu_possible_map = CPU_MASK_NONE;
-EXPORT_SYMBOL(cpu_possible_map);
 
 /* The per processor IRQ masks (these are usually kept in sync) */
 static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned;
 
 #define raw_smp_processor_id() (current_thread_info()->cpu)
 
 extern cpumask_t cpu_callout_map;
-extern cpumask_t cpu_possible_map;
-extern cpumask_t cpu_present_map;
 
 static __inline__ int hard_smp_processor_id(void)
 {
 
 
 endif # MODULES
 
+config INIT_ALL_POSSIBLE
+       bool
+       help
+         Back when each arch used to define their own cpu_online_map and
+         cpu_possible_map, some of them chose to initialize cpu_possible_map
+         with all 1s, and others with all 0s.  When they were centralised,
+         it was better to provide this option than to break all the archs
+         and have several arch maintainers persuing me down dark alleys.
+
 config STOP_MACHINE
        bool
        default y
 
 cpumask_t cpu_present_map __read_mostly;
 EXPORT_SYMBOL(cpu_present_map);
 
-#ifndef CONFIG_SMP
-
 /*
  * Represents all cpu's that are currently online.
  */
-cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL;
+cpumask_t cpu_online_map __read_mostly;
 EXPORT_SYMBOL(cpu_online_map);
 
+#ifdef CONFIG_INIT_ALL_POSSIBLE
 cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;
+#else
+cpumask_t cpu_possible_map __read_mostly;
+#endif
 EXPORT_SYMBOL(cpu_possible_map);
 
-#else /* CONFIG_SMP */
-
+#ifdef CONFIG_SMP
 /* Serializes the updates to cpu_online_map, cpu_present_map */
 static DEFINE_MUTEX(cpu_add_remove_lock);