1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <linux/kexec.h>
7 #include <linux/crash_dump.h>
9 #include <linux/topology.h>
10 #include <asm/sections.h>
11 #include <asm/processor.h>
12 #include <asm/setup.h>
13 #include <asm/mpspec.h>
14 #include <asm/apicdef.h>
15 #include <asm/highmem.h>
16 #include <asm/cpumask.h>
18 #ifdef CONFIG_X86_LOCAL_APIC
19 unsigned int num_processors;
20 unsigned disabled_cpus __cpuinitdata;
21 /* Processor that is doing the boot up */
22 unsigned int boot_cpu_physical_apicid = -1U;
23 EXPORT_SYMBOL(boot_cpu_physical_apicid);
24 unsigned int max_physical_apicid;
26 /* Bitmask of physically existing CPUs */
27 physid_mask_t phys_cpu_present_map;
30 /* map cpu index to physical APIC ID */
31 DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
32 DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
33 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
34 EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
36 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
39 /* map cpu index to node index */
40 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
41 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
43 /* which logical CPUs are on which nodes */
44 cpumask_t *node_to_cpumask_map;
45 EXPORT_SYMBOL(node_to_cpumask_map);
47 /* setup node_to_cpumask_map */
48 static void __init setup_node_to_cpumask_map(void);
51 static inline void setup_node_to_cpumask_map(void) { }
54 #if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP)
56 * Copy data used in early init routines from the initial arrays to the
57 * per cpu data areas. These arrays then become expendable and the
58 * *_early_ptr's are zeroed indicating that the static arrays are gone.
60 static void __init setup_per_cpu_maps(void)
64 for_each_possible_cpu(cpu) {
65 per_cpu(x86_cpu_to_apicid, cpu) =
66 early_per_cpu_map(x86_cpu_to_apicid, cpu);
67 per_cpu(x86_bios_cpu_apicid, cpu) =
68 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
70 per_cpu(x86_cpu_to_node_map, cpu) =
71 early_per_cpu_map(x86_cpu_to_node_map, cpu);
75 /* indicate the early static arrays will soon be gone */
76 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
77 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
79 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
85 * Great future not-so-futuristic plan: make i386 and x86_64 do it
88 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
89 EXPORT_SYMBOL(__per_cpu_offset);
90 static inline void setup_cpu_pda_map(void) { }
92 #elif !defined(CONFIG_SMP)
93 static inline void setup_cpu_pda_map(void) { }
95 #else /* CONFIG_SMP && CONFIG_X86_64 */
98 * Allocate cpu_pda pointer table and array via alloc_bootmem.
100 static void __init setup_cpu_pda_map(void)
103 struct x8664_pda **new_cpu_pda;
107 size = roundup(sizeof(struct x8664_pda), cache_line_size());
109 /* allocate cpu_pda array and pointer table */
111 unsigned long tsize = nr_cpu_ids * sizeof(void *);
112 unsigned long asize = size * (nr_cpu_ids - 1);
114 tsize = roundup(tsize, cache_line_size());
115 new_cpu_pda = alloc_bootmem(tsize + asize);
116 pda = (char *)new_cpu_pda + tsize;
119 /* initialize pointer table to static pda's */
120 for_each_possible_cpu(cpu) {
122 /* leave boot cpu pda in place */
123 new_cpu_pda[0] = cpu_pda(0);
126 new_cpu_pda[cpu] = (struct x8664_pda *)pda;
127 new_cpu_pda[cpu]->in_bootmem = 1;
131 /* point to new pointer table */
132 _cpu_pda = new_cpu_pda;
135 #endif /* CONFIG_SMP && CONFIG_X86_64 */
139 /* correctly size the local cpu masks */
140 static void setup_cpu_local_masks(void)
142 alloc_bootmem_cpumask_var(&cpu_initialized_mask);
143 alloc_bootmem_cpumask_var(&cpu_callin_mask);
144 alloc_bootmem_cpumask_var(&cpu_callout_mask);
145 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
148 #else /* CONFIG_X86_32 */
150 static inline void setup_cpu_local_masks(void)
154 #endif /* CONFIG_X86_32 */
158 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
159 * Always point %gs to its beginning
161 void __init setup_per_cpu_areas(void)
163 ssize_t size, old_size;
166 unsigned long align = 1;
168 /* Setup cpu_pda map */
171 /* Copy section for each CPU (we discard the original) */
172 old_size = PERCPU_ENOUGH_ROOM;
173 align = max_t(unsigned long, PAGE_SIZE, align);
174 size = roundup(old_size, align);
176 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
177 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
179 pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size);
181 for_each_possible_cpu(cpu) {
182 #ifndef CONFIG_NEED_MULTIPLE_NODES
183 ptr = __alloc_bootmem(size, align,
184 __pa(MAX_DMA_ADDRESS));
186 int node = early_cpu_to_node(cpu);
187 if (!node_online(node) || !NODE_DATA(node)) {
188 ptr = __alloc_bootmem(size, align,
189 __pa(MAX_DMA_ADDRESS));
190 pr_info("cpu %d has no node %d or node-local memory\n",
192 pr_debug("per cpu data for cpu%d at %016lx\n",
195 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
196 __pa(MAX_DMA_ADDRESS));
197 pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
198 cpu, node, __pa(ptr));
201 per_cpu_offset(cpu) = ptr - __per_cpu_start;
202 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
205 /* Setup percpu data maps */
206 setup_per_cpu_maps();
208 /* Setup node to cpumask map */
209 setup_node_to_cpumask_map();
211 /* Setup cpu initialized, callin, callout masks */
212 setup_cpu_local_masks();
220 * Allocate node_to_cpumask_map based on number of available nodes
221 * Requires node_possible_map to be valid.
223 * Note: node_to_cpumask() is not valid until after this is done.
225 static void __init setup_node_to_cpumask_map(void)
227 unsigned int node, num = 0;
230 /* setup nr_node_ids if not done yet */
231 if (nr_node_ids == MAX_NUMNODES) {
232 for_each_node_mask(node, node_possible_map)
234 nr_node_ids = num + 1;
237 /* allocate the map */
238 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
240 pr_debug("Node to cpumask map at %p for %d nodes\n",
243 /* node_to_cpumask() will now work */
244 node_to_cpumask_map = map;
247 void __cpuinit numa_set_node(int cpu, int node)
249 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
251 if (cpu_pda(cpu) && node != NUMA_NO_NODE)
252 cpu_pda(cpu)->nodenumber = node;
255 cpu_to_node_map[cpu] = node;
257 else if (per_cpu_offset(cpu))
258 per_cpu(x86_cpu_to_node_map, cpu) = node;
261 pr_debug("Setting node for non-present cpu %d\n", cpu);
264 void __cpuinit numa_clear_node(int cpu)
266 numa_set_node(cpu, NUMA_NO_NODE);
269 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
271 void __cpuinit numa_add_cpu(int cpu)
273 cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
276 void __cpuinit numa_remove_cpu(int cpu)
278 cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]);
281 #else /* CONFIG_DEBUG_PER_CPU_MAPS */
284 * --------- debug versions of the numa functions ---------
286 static void __cpuinit numa_set_cpumask(int cpu, int enable)
288 int node = cpu_to_node(cpu);
292 if (node_to_cpumask_map == NULL) {
293 printk(KERN_ERR "node_to_cpumask_map NULL\n");
298 mask = &node_to_cpumask_map[node];
302 cpu_clear(cpu, *mask);
304 cpulist_scnprintf(buf, sizeof(buf), mask);
305 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
306 enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
309 void __cpuinit numa_add_cpu(int cpu)
311 numa_set_cpumask(cpu, 1);
314 void __cpuinit numa_remove_cpu(int cpu)
316 numa_set_cpumask(cpu, 0);
319 int cpu_to_node(int cpu)
321 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
323 "cpu_to_node(%d): usage too early!\n", cpu);
325 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
327 return per_cpu(x86_cpu_to_node_map, cpu);
329 EXPORT_SYMBOL(cpu_to_node);
332 * Same function as cpu_to_node() but used if called before the
333 * per_cpu areas are setup.
335 int early_cpu_to_node(int cpu)
337 if (early_per_cpu_ptr(x86_cpu_to_node_map))
338 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
340 if (!per_cpu_offset(cpu)) {
342 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
346 return per_cpu(x86_cpu_to_node_map, cpu);
351 static const cpumask_t cpu_mask_none;
354 * Returns a pointer to the bitmask of CPUs on Node 'node'.
356 const cpumask_t *cpumask_of_node(int node)
358 if (node_to_cpumask_map == NULL) {
360 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
363 return (const cpumask_t *)&cpu_online_map;
365 if (node >= nr_node_ids) {
367 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
370 return &cpu_mask_none;
372 return &node_to_cpumask_map[node];
374 EXPORT_SYMBOL(cpumask_of_node);
377 * Returns a bitmask of CPUs on Node 'node'.
379 * Side note: this function creates the returned cpumask on the stack
380 * so with a high NR_CPUS count, excessive stack space is used. The
381 * node_to_cpumask_ptr function should be used whenever possible.
383 cpumask_t node_to_cpumask(int node)
385 if (node_to_cpumask_map == NULL) {
387 "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
389 return cpu_online_map;
391 if (node >= nr_node_ids) {
393 "node_to_cpumask(%d): node > nr_node_ids(%d)\n",
396 return cpu_mask_none;
398 return node_to_cpumask_map[node];
400 EXPORT_SYMBOL(node_to_cpumask);
403 * --------- end of debug versions of the numa functions ---------
406 #endif /* CONFIG_DEBUG_PER_CPU_MAPS */
408 #endif /* X86_64_NUMA */