2 * x86 SMP booting functions
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
6 * Copyright 2001 Andi Kleen, SuSE Labs.
8 * Much of the core SMP work is based on previous work by Thomas Radke, to
9 * whom a great many thanks are extended.
11 * Thanks to Intel for making available several different Pentium,
12 * Pentium Pro and Pentium-II/Xeon MP machines.
13 * Original development of Linux SMP code supported by Caldera.
15 * This code is released under the GNU General Public License version 2
18 * Felix Koop : NR_CPUS used properly
19 * Jose Renau : Handle single CPU case.
20 * Alan Cox : By repeated request 8) - Total BogoMIP report.
21 * Greg Wright : Fix for kernel stacks panic.
22 * Erich Boleyn : MP v1.4 and additional changes.
23 * Matthias Sattler : Changes for 2.1 kernel map.
24 * Michel Lespinasse : Changes for 2.1 kernel map.
25 * Michael Chastain : Change trampoline.S to gnu as.
26 * Alan Cox : Dumb bug: 'B' step PPro's are fine
27 * Ingo Molnar : Added APIC timers, based on code
29 * Ingo Molnar : various cleanups and rewrites
30 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
31 * Maciej W. Rozycki : Bits for genuine 82489DX APICs
32 * Andi Kleen : Changed for SMP boot into long mode.
33 * Rusty Russell : Hacked into shape for new "hotplug" boot process.
34 * Andi Kleen : Converted to new state machine.
36 * Probably mostly hotplug CPU ready now.
40 #include <linux/config.h>
41 #include <linux/init.h>
44 #include <linux/kernel_stat.h>
45 #include <linux/smp_lock.h>
46 #include <linux/irq.h>
47 #include <linux/bootmem.h>
48 #include <linux/thread_info.h>
49 #include <linux/module.h>
51 #include <linux/delay.h>
52 #include <linux/mc146818rtc.h>
54 #include <asm/pgalloc.h>
56 #include <asm/kdebug.h>
57 #include <asm/tlbflush.h>
58 #include <asm/proto.h>
61 /* Change for real CPU hotplug. Note other files need to be fixed
63 #define __cpuinit __init
64 #define __cpuinitdata __initdata
66 /* Number of siblings per CPU package */
67 int smp_num_siblings = 1;
68 /* Package ID of each logical CPU */
69 u8 phys_proc_id[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
70 u8 cpu_core_id[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
71 EXPORT_SYMBOL(phys_proc_id);
72 EXPORT_SYMBOL(cpu_core_id);
74 /* Bitmask of currently online CPUs */
75 cpumask_t cpu_online_map;
77 EXPORT_SYMBOL(cpu_online_map);
80 * Private maps to synchronize booting between AP and BP.
81 * Probably not needed anymore, but it makes for easier debugging. -AK
83 cpumask_t cpu_callin_map;
84 cpumask_t cpu_callout_map;
86 cpumask_t cpu_possible_map;
87 EXPORT_SYMBOL(cpu_possible_map);
89 /* Per CPU bogomips and other parameters */
90 struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
92 /* Set when the idlers are all forked */
93 int smp_threads_ready;
95 cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
96 cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
97 EXPORT_SYMBOL(cpu_core_map);
100 * Trampoline 80x86 program as an array.
103 extern unsigned char trampoline_data[];
104 extern unsigned char trampoline_end[];
107 * Currently trivial. Write the real->protected mode
108 * bootstrap into the page concerned. The caller
109 * has made sure it's suitably aligned.
112 static unsigned long __cpuinit setup_trampoline(void)
114 void *tramp = __va(SMP_TRAMPOLINE_BASE);
115 memcpy(tramp, trampoline_data, trampoline_end - trampoline_data);
116 return virt_to_phys(tramp);
120 * The bootstrap kernel entry code has set these up. Save them for
124 static void __cpuinit smp_store_cpu_info(int id)
126 struct cpuinfo_x86 *c = cpu_data + id;
134 * New Funky TSC sync algorithm borrowed from IA64.
135 * Main advantage is that it doesn't reset the TSCs fully and
136 * in general looks more robust and it works better than my earlier
137 * attempts. I believe it was written by David Mosberger. Some minor
138 * adjustments for x86-64 by me -AK
140 * Original comment reproduced below.
142 * Synchronize TSC of the current (slave) CPU with the TSC of the
143 * MASTER CPU (normally the time-keeper CPU). We use a closed loop to
144 * eliminate the possibility of unaccounted-for errors (such as
145 * getting a machine check in the middle of a calibration step). The
146 * basic idea is for the slave to ask the master what itc value it has
147 * and to read its own itc before and after the master responds. Each
148 * iteration gives us three timestamps:
161 * The goal is to adjust the slave's TSC such that tm falls exactly
162 * half-way between t0 and t1. If we achieve this, the clocks are
163 * synchronized provided the interconnect between the slave and the
164 * master is symmetric. Even if the interconnect were asymmetric, we
165 * would still know that the synchronization error is smaller than the
166 * roundtrip latency (t0 - t1).
168 * When the interconnect is quiet and symmetric, this lets us
169 * synchronize the TSC to within one or two cycles. However, we can
170 * only *guarantee* that the synchronization is accurate to within a
171 * round-trip time, which is typically in the range of several hundred
172 * cycles (e.g., ~500 cycles). In practice, this means that the TSCs
173 * are usually almost perfectly synchronized, but we shouldn't assume
174 * that the accuracy is much better than half a micro second or so.
176 * [there are other errors like the latency of RDTSC and of the
177 * WRMSR. These can also account to hundreds of cycles. So it's
178 * probably worse. It claims 153 cycles error on a dual Opteron,
179 * but I suspect the numbers are actually somewhat worse -AK]
183 #define SLAVE (SMP_CACHE_BYTES/8)
185 /* Intentionally don't use cpu_relax() while TSC synchronization
186 because we don't want to go into funky power save modi or cause
187 hypervisors to schedule us away. Going to sleep would likely affect
188 latency and low latency is the primary objective here. -AK */
189 #define no_cpu_relax() barrier()
191 static __cpuinitdata DEFINE_SPINLOCK(tsc_sync_lock);
192 static volatile __cpuinitdata unsigned long go[SLAVE + 1];
193 static int notscsync __cpuinitdata;
195 #undef DEBUG_TSC_SYNC
197 #define NUM_ROUNDS 64 /* magic value */
198 #define NUM_ITERS 5 /* likewise */
200 /* Callback on boot CPU */
201 static __cpuinit void sync_master(void *arg)
203 unsigned long flags, i;
205 if (smp_processor_id() != boot_cpu_id)
210 local_irq_save(flags);
212 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) {
219 local_irq_restore(flags);
223 * Return the number of cycles by which our tsc differs from the tsc
224 * on the master (time-keeper) CPU. A positive number indicates our
225 * tsc is ahead of the master, negative that it is behind.
228 get_delta(long *rt, long *master)
230 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
231 unsigned long tcenter, t0, t1, tm;
234 for (i = 0; i < NUM_ITERS; ++i) {
237 while (!(tm = go[SLAVE]))
242 if (t1 - t0 < best_t1 - best_t0)
243 best_t0 = t0, best_t1 = t1, best_tm = tm;
246 *rt = best_t1 - best_t0;
247 *master = best_tm - best_t0;
249 /* average best_t0 and best_t1 without overflow: */
250 tcenter = (best_t0/2 + best_t1/2);
251 if (best_t0 % 2 + best_t1 % 2 == 2)
253 return tcenter - best_tm;
256 static __cpuinit void sync_tsc(void)
259 long delta, adj, adjust_latency = 0;
260 unsigned long flags, rt, master_time_stamp, bound;
262 static struct syncdebug {
263 long rt; /* roundtrip time */
264 long master; /* master's timestamp */
265 long diff; /* difference between midpoint and master's timestamp */
266 long lat; /* estimate of tsc adjustment latency */
267 } t[NUM_ROUNDS] __cpuinitdata;
272 smp_call_function(sync_master, NULL, 1, 0);
274 while (go[MASTER]) /* wait for master to be ready */
277 spin_lock_irqsave(&tsc_sync_lock, flags);
279 for (i = 0; i < NUM_ROUNDS; ++i) {
280 delta = get_delta(&rt, &master_time_stamp);
282 done = 1; /* let's lock on to this... */
289 adjust_latency += -delta;
290 adj = -delta + adjust_latency/4;
295 wrmsrl(MSR_IA32_TSC, t + adj);
299 t[i].master = master_time_stamp;
301 t[i].lat = adjust_latency/4;
305 spin_unlock_irqrestore(&tsc_sync_lock, flags);
308 for (i = 0; i < NUM_ROUNDS; ++i)
309 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
310 t[i].rt, t[i].master, t[i].diff, t[i].lat);
314 "CPU %d: synchronized TSC with CPU %u (last diff %ld cycles, "
315 "maxerr %lu cycles)\n",
316 smp_processor_id(), boot_cpu_id, delta, rt);
319 static void __cpuinit tsc_sync_wait(void)
321 if (notscsync || !cpu_has_tsc)
323 printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n", smp_processor_id(),
328 static __init int notscsync_setup(char *s)
333 __setup("notscsync", notscsync_setup);
335 static atomic_t init_deasserted __cpuinitdata;
338 * Report back to the Boot Processor.
341 void __cpuinit smp_callin(void)
344 unsigned long timeout;
347 * If waken up by an INIT in an 82489DX configuration
348 * we may get here before an INIT-deassert IPI reaches
349 * our local APIC. We have to wait for the IPI or we'll
350 * lock up on an APIC access.
352 while (!atomic_read(&init_deasserted))
356 * (This works even if the APIC is not enabled.)
358 phys_id = GET_APIC_ID(apic_read(APIC_ID));
359 cpuid = smp_processor_id();
360 if (cpu_isset(cpuid, cpu_callin_map)) {
361 panic("smp_callin: phys CPU#%d, CPU#%d already present??\n",
364 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
367 * STARTUP IPIs are fragile beasts as they might sometimes
368 * trigger some glue motherboard logic. Complete APIC bus
369 * silence for 1 second, this overestimates the time the
370 * boot CPU is spending to send the up to 2 STARTUP IPIs
371 * by a factor of two. This should be enough.
375 * Waiting 2s total for startup (udelay is not yet working)
377 timeout = jiffies + 2*HZ;
378 while (time_before(jiffies, timeout)) {
380 * Has the boot CPU finished it's STARTUP sequence?
382 if (cpu_isset(cpuid, cpu_callout_map))
387 if (!time_before(jiffies, timeout)) {
388 panic("smp_callin: CPU%d started up but did not get a callout!\n",
393 * the boot CPU has finished the init stage and is spinning
394 * on callin_map until we finish. We are free to set up this
395 * CPU, first the APIC. (this is probably redundant on most
399 Dprintk("CALLIN, before setup_local_APIC().\n");
406 Dprintk("Stack at about %p\n",&cpuid);
408 disable_APIC_timer();
411 * Save our processor parameters
413 smp_store_cpu_info(cpuid);
416 * Allow the master to continue.
418 cpu_set(cpuid, cpu_callin_map);
422 * Setup code on secondary processor (after comming out of the trampoline)
424 void __cpuinit start_secondary(void)
427 * Dont put anything before smp_callin(), SMP
428 * booting is too fragile that we want to limit the
429 * things done here to the most necessary things.
434 /* otherwise gcc will move up the smp_processor_id before the cpu_init */
437 Dprintk("cpu %d: setting up apic clock\n", smp_processor_id());
438 setup_secondary_APIC_clock();
440 Dprintk("cpu %d: enabling apic timer\n", smp_processor_id());
442 if (nmi_watchdog == NMI_IO_APIC) {
443 disable_8259A_irq(0);
444 enable_NMI_through_LVT0(NULL);
451 * Allow the master to continue.
453 cpu_set(smp_processor_id(), cpu_online_map);
456 /* Wait for TSC sync to not schedule things before.
457 We still process interrupts, which could see an inconsistent
458 time in that window unfortunately. */
464 extern volatile unsigned long init_rsp;
465 extern void (*initial_code)(void);
468 static void inquire_remote_apic(int apicid)
470 unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
471 char *names[] = { "ID", "VERSION", "SPIV" };
474 printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid);
476 for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
477 printk("... APIC #%d %s: ", apicid, names[i]);
482 apic_wait_icr_idle();
484 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
485 apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
490 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
491 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
494 case APIC_ICR_RR_VALID:
495 status = apic_read(APIC_RRR);
496 printk("%08x\n", status);
506 * Kick the secondary to wake up.
508 static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int start_rip)
510 unsigned long send_status = 0, accept_status = 0;
511 int maxlvt, timeout, num_starts, j;
513 Dprintk("Asserting INIT.\n");
516 * Turn INIT on target chip
518 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
523 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
526 Dprintk("Waiting for send to finish...\n");
531 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
532 } while (send_status && (timeout++ < 1000));
536 Dprintk("Deasserting INIT.\n");
539 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
542 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
544 Dprintk("Waiting for send to finish...\n");
549 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
550 } while (send_status && (timeout++ < 1000));
552 atomic_set(&init_deasserted, 1);
555 * Should we send STARTUP IPIs ?
557 * Determine this based on the APIC version.
558 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
560 if (APIC_INTEGRATED(apic_version[phys_apicid]))
566 * Run STARTUP IPI loop.
568 Dprintk("#startup loops: %d.\n", num_starts);
570 maxlvt = get_maxlvt();
572 for (j = 1; j <= num_starts; j++) {
573 Dprintk("Sending STARTUP #%d.\n",j);
574 apic_read_around(APIC_SPIV);
575 apic_write(APIC_ESR, 0);
577 Dprintk("After apic_write.\n");
584 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
586 /* Boot on the stack */
587 /* Kick the second */
588 apic_write_around(APIC_ICR, APIC_DM_STARTUP
589 | (start_rip >> 12));
592 * Give the other CPU some time to accept the IPI.
596 Dprintk("Startup point 1.\n");
598 Dprintk("Waiting for send to finish...\n");
603 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
604 } while (send_status && (timeout++ < 1000));
607 * Give the other CPU some time to accept the IPI.
611 * Due to the Pentium erratum 3AP.
614 apic_read_around(APIC_SPIV);
615 apic_write(APIC_ESR, 0);
617 accept_status = (apic_read(APIC_ESR) & 0xEF);
618 if (send_status || accept_status)
621 Dprintk("After Startup.\n");
624 printk(KERN_ERR "APIC never delivered???\n");
626 printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
628 return (send_status | accept_status);
634 static int __cpuinit do_boot_cpu(int cpu, int apicid)
636 struct task_struct *idle;
637 unsigned long boot_error;
639 unsigned long start_rip;
641 * We can't use kernel_thread since we must avoid to
642 * reschedule the child.
644 idle = fork_idle(cpu);
646 printk("failed fork for CPU %d\n", cpu);
647 return PTR_ERR(idle);
650 cpu_pda[cpu].pcurrent = idle;
652 start_rip = setup_trampoline();
654 init_rsp = idle->thread.rsp;
655 per_cpu(init_tss,cpu).rsp0 = init_rsp;
656 initial_code = start_secondary;
657 clear_ti_thread_flag(idle->thread_info, TIF_FORK);
659 printk(KERN_INFO "Booting processor %d/%d rip %lx rsp %lx\n", cpu, apicid,
660 start_rip, init_rsp);
663 * This grunge runs the startup process for
664 * the targeted processor.
667 atomic_set(&init_deasserted, 0);
669 Dprintk("Setting warm reset code and vector.\n");
671 CMOS_WRITE(0xa, 0xf);
674 *((volatile unsigned short *) phys_to_virt(0x469)) = start_rip >> 4;
676 *((volatile unsigned short *) phys_to_virt(0x467)) = start_rip & 0xf;
680 * Be paranoid about clearing APIC errors.
682 if (APIC_INTEGRATED(apic_version[apicid])) {
683 apic_read_around(APIC_SPIV);
684 apic_write(APIC_ESR, 0);
689 * Status is now clean
694 * Starting actual IPI sequence...
696 boot_error = wakeup_secondary_via_INIT(apicid, start_rip);
700 * allow APs to start initializing.
702 Dprintk("Before Callout %d.\n", cpu);
703 cpu_set(cpu, cpu_callout_map);
704 Dprintk("After Callout %d.\n", cpu);
707 * Wait 5s total for a response
709 for (timeout = 0; timeout < 50000; timeout++) {
710 if (cpu_isset(cpu, cpu_callin_map))
711 break; /* It has booted */
715 if (cpu_isset(cpu, cpu_callin_map)) {
716 /* number CPUs logically, starting from 1 (BSP is 0) */
717 Dprintk("CPU has booted.\n");
720 if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE))
722 /* trampoline started but...? */
723 printk("Stuck ??\n");
725 /* trampoline code not run */
726 printk("Not responding.\n");
728 inquire_remote_apic(apicid);
733 cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
734 clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
735 cpu_clear(cpu, cpu_present_map);
736 cpu_clear(cpu, cpu_possible_map);
737 x86_cpu_to_apicid[cpu] = BAD_APICID;
738 x86_cpu_to_log_apicid[cpu] = BAD_APICID;
745 cycles_t cacheflush_time;
746 unsigned long cache_decay_ticks;
749 * Construct cpu_sibling_map[], so that we can tell the sibling CPU
750 * on SMT systems efficiently.
752 static __cpuinit void detect_siblings(void)
756 for (cpu = 0; cpu < NR_CPUS; cpu++) {
757 cpus_clear(cpu_sibling_map[cpu]);
758 cpus_clear(cpu_core_map[cpu]);
761 for_each_online_cpu (cpu) {
762 struct cpuinfo_x86 *c = cpu_data + cpu;
765 if (smp_num_siblings > 1) {
766 for_each_online_cpu (i) {
767 if (cpu_core_id[cpu] == cpu_core_id[i]) {
769 cpu_set(i, cpu_sibling_map[cpu]);
774 cpu_set(cpu, cpu_sibling_map[cpu]);
777 if (siblings != smp_num_siblings) {
779 "WARNING: %d siblings found for CPU%d, should be %d\n",
780 siblings, cpu, smp_num_siblings);
781 smp_num_siblings = siblings;
783 if (c->x86_num_cores > 1) {
784 for_each_online_cpu(i) {
785 if (phys_proc_id[cpu] == phys_proc_id[i])
786 cpu_set(i, cpu_core_map[cpu]);
789 cpu_core_map[cpu] = cpu_sibling_map[cpu];
794 * Cleanup possible dangling ends...
796 static __cpuinit void smp_cleanup_boot(void)
799 * Paranoid: Set warm reset code and vector here back
805 * Reset trampoline flag
807 *((volatile int *) phys_to_virt(0x467)) = 0;
809 #ifndef CONFIG_HOTPLUG_CPU
811 * Free pages reserved for SMP bootup.
812 * When you add hotplug CPU support later remove this
813 * Note there is more work to be done for later CPU bootup.
816 free_page((unsigned long) __va(PAGE_SIZE));
817 free_page((unsigned long) __va(SMP_TRAMPOLINE_BASE));
822 * Fall back to non SMP mode after errors.
824 * RED-PEN audit/test this more. I bet there is more state messed up here.
826 static __cpuinit void disable_smp(void)
828 cpu_present_map = cpumask_of_cpu(0);
829 cpu_possible_map = cpumask_of_cpu(0);
830 if (smp_found_config)
831 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
833 phys_cpu_present_map = physid_mask_of_physid(0);
834 cpu_set(0, cpu_sibling_map[0]);
835 cpu_set(0, cpu_core_map[0]);
839 * Handle user cpus=... parameter.
841 static __cpuinit void enforce_max_cpus(unsigned max_cpus)
845 for (i = 0; i < NR_CPUS; i++) {
846 if (!cpu_possible(i))
848 if (++k > max_cpus) {
849 cpu_clear(i, cpu_possible_map);
850 cpu_clear(i, cpu_present_map);
856 * Various sanity checks.
858 static int __cpuinit smp_sanity_check(unsigned max_cpus)
860 if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
861 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
862 hard_smp_processor_id());
863 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
867 * If we couldn't find an SMP configuration at boot time,
868 * get out of here now!
870 if (!smp_found_config) {
871 printk(KERN_NOTICE "SMP motherboard not detected.\n");
873 if (APIC_init_uniprocessor())
874 printk(KERN_NOTICE "Local APIC not detected."
875 " Using dummy APIC emulation.\n");
880 * Should not be necessary because the MP table should list the boot
881 * CPU too, but we do it for the sake of robustness anyway.
883 if (!physid_isset(boot_cpu_id, phys_cpu_present_map)) {
884 printk(KERN_NOTICE "weird, boot CPU (#%d) not listed by the BIOS.\n",
886 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
890 * If we couldn't find a local APIC, then get out of here now!
892 if (APIC_INTEGRATED(apic_version[boot_cpu_id]) && !cpu_has_apic) {
893 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
895 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
901 * If SMP should be disabled, then really disable it!
904 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
913 * Prepare for SMP bootup. The MP table or ACPI has been read
914 * earlier. Just do some sanity checking here and enable APIC mode.
916 void __cpuinit smp_prepare_cpus(unsigned int max_cpus)
920 nmi_watchdog_default();
921 current_cpu_data = boot_cpu_data;
922 current_thread_info()->cpu = 0; /* needed? */
924 enforce_max_cpus(max_cpus);
927 * Fill in cpu_present_mask
929 for (i = 0; i < NR_CPUS; i++) {
930 int apicid = cpu_present_to_apicid(i);
931 if (physid_isset(apicid, phys_cpu_present_map)) {
932 cpu_set(i, cpu_present_map);
933 /* possible map would be different if we supported real
935 cpu_set(i, cpu_possible_map);
939 if (smp_sanity_check(max_cpus) < 0) {
940 printk(KERN_INFO "SMP disabled\n");
947 * Switch from PIC to APIC mode.
952 if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_id) {
953 panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
954 GET_APIC_ID(apic_read(APIC_ID)), boot_cpu_id);
955 /* Or can we switch back to PIC here? */
959 * Now start the IO-APICs
961 if (!skip_ioapic_setup && nr_ioapics)
967 * Set up local APIC timer on boot CPU.
970 setup_boot_APIC_clock();
974 * Early setup to make printk work.
976 void __init smp_prepare_boot_cpu(void)
978 int me = smp_processor_id();
979 cpu_set(me, cpu_online_map);
980 cpu_set(me, cpu_callout_map);
984 * Entry point to boot a CPU.
986 * This is all __cpuinit, not __devinit for now because we don't support
989 int __cpuinit __cpu_up(unsigned int cpu)
992 int apicid = cpu_present_to_apicid(cpu);
994 WARN_ON(irqs_disabled());
996 Dprintk("++++++++++++++++++++=_---CPU UP %u\n", cpu);
998 if (apicid == BAD_APICID || apicid == boot_cpu_id ||
999 !physid_isset(apicid, phys_cpu_present_map)) {
1000 printk("__cpu_up: bad cpu %d\n", cpu);
1005 err = do_boot_cpu(cpu, apicid);
1007 Dprintk("do_boot_cpu failed %d\n", err);
1011 /* Unleash the CPU! */
1012 Dprintk("waiting for cpu %d\n", cpu);
1014 while (!cpu_isset(cpu, cpu_online_map))
1020 * Finish the SMP boot.
1022 void __cpuinit smp_cpus_done(unsigned int max_cpus)
1027 #ifdef CONFIG_X86_IO_APIC
1028 setup_ioapic_dest();
1034 check_nmi_watchdog();