1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <linux/kexec.h>
7 #include <linux/crash_dump.h>
9 #include <linux/topology.h>
10 #include <asm/sections.h>
11 #include <asm/processor.h>
12 #include <asm/setup.h>
13 #include <asm/mpspec.h>
14 #include <asm/apicdef.h>
15 #include <asm/highmem.h>
16 #include <asm/proto.h>
17 #include <asm/cpumask.h>
19 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
20 # define DBG(x...) printk(KERN_DEBUG x)
26 * Could be inside CONFIG_HAVE_SETUP_PER_CPU_AREA with other stuff but
27 * voyager wants cpu_number too.
30 DEFINE_PER_CPU(int, cpu_number);
31 EXPORT_PER_CPU_SYMBOL(cpu_number);
34 #ifdef CONFIG_X86_LOCAL_APIC
35 unsigned int num_processors;
36 unsigned disabled_cpus __cpuinitdata;
37 /* Processor that is doing the boot up */
38 unsigned int boot_cpu_physical_apicid = -1U;
39 EXPORT_SYMBOL(boot_cpu_physical_apicid);
40 unsigned int max_physical_apicid;
42 /* Bitmask of physically existing CPUs */
43 physid_mask_t phys_cpu_present_map;
47 * Map cpu index to physical APIC ID
49 DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
50 DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
51 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
52 EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
54 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
55 #define X86_64_NUMA 1 /* (used later) */
56 DEFINE_PER_CPU(int, node_number) = 0;
57 EXPORT_PER_CPU_SYMBOL(node_number);
60 * Map cpu index to node index
62 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
63 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
66 * Which logical CPUs are on which nodes
68 cpumask_t *node_to_cpumask_map;
69 EXPORT_SYMBOL(node_to_cpumask_map);
72 * Setup node_to_cpumask_map
74 static void __init setup_node_to_cpumask_map(void);
77 static inline void setup_node_to_cpumask_map(void) { }
82 /* correctly size the local cpu masks */
83 static void setup_cpu_local_masks(void)
85 alloc_bootmem_cpumask_var(&cpu_initialized_mask);
86 alloc_bootmem_cpumask_var(&cpu_callin_mask);
87 alloc_bootmem_cpumask_var(&cpu_callout_mask);
88 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
91 #else /* CONFIG_X86_32 */
93 static inline void setup_cpu_local_masks(void)
97 #endif /* CONFIG_X86_32 */
99 #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
101 * Copy data used in early init routines from the initial arrays to the
102 * per cpu data areas. These arrays then become expendable and the
103 * *_early_ptr's are zeroed indicating that the static arrays are gone.
105 static void __init setup_per_cpu_maps(void)
109 for_each_possible_cpu(cpu) {
110 per_cpu(x86_cpu_to_apicid, cpu) =
111 early_per_cpu_map(x86_cpu_to_apicid, cpu);
112 per_cpu(x86_bios_cpu_apicid, cpu) =
113 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
115 per_cpu(x86_cpu_to_node_map, cpu) =
116 early_per_cpu_map(x86_cpu_to_node_map, cpu);
120 /* indicate the early static arrays will soon be gone */
121 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
122 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
124 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
129 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
130 [0] = (unsigned long)__per_cpu_load,
133 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
135 EXPORT_SYMBOL(__per_cpu_offset);
139 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
140 * Always point %gs to its beginning
142 void __init setup_per_cpu_areas(void)
144 ssize_t size, old_size;
147 unsigned long align = 1;
149 /* Copy section for each CPU (we discard the original) */
150 old_size = PERCPU_ENOUGH_ROOM;
151 align = max_t(unsigned long, PAGE_SIZE, align);
152 size = roundup(old_size, align);
154 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
155 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
157 pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size);
159 for_each_possible_cpu(cpu) {
160 #ifndef CONFIG_NEED_MULTIPLE_NODES
161 ptr = __alloc_bootmem(size, align,
162 __pa(MAX_DMA_ADDRESS));
164 int node = early_cpu_to_node(cpu);
165 if (!node_online(node) || !NODE_DATA(node)) {
166 ptr = __alloc_bootmem(size, align,
167 __pa(MAX_DMA_ADDRESS));
168 pr_info("cpu %d has no node %d or node-local memory\n",
170 pr_debug("per cpu data for cpu%d at %016lx\n",
173 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
174 __pa(MAX_DMA_ADDRESS));
175 pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
176 cpu, node, __pa(ptr));
180 memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start);
181 per_cpu_offset(cpu) = ptr - __per_cpu_start;
182 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
183 per_cpu(cpu_number, cpu) = cpu;
185 per_cpu(irq_stack_ptr, cpu) =
186 per_cpu(irq_stack_union.irq_stack, cpu) + IRQ_STACK_SIZE - 64;
188 * Up to this point, CPU0 has been using .data.init
189 * area. Reload %gs offset for CPU0.
195 DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
198 /* Setup percpu data maps */
199 setup_per_cpu_maps();
201 /* Setup node to cpumask map */
202 setup_node_to_cpumask_map();
204 /* Setup cpu initialized, callin, callout masks */
205 setup_cpu_local_masks();
213 * Allocate node_to_cpumask_map based on number of available nodes
214 * Requires node_possible_map to be valid.
216 * Note: node_to_cpumask() is not valid until after this is done.
217 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
219 static void __init setup_node_to_cpumask_map(void)
221 unsigned int node, num = 0;
224 /* setup nr_node_ids if not done yet */
225 if (nr_node_ids == MAX_NUMNODES) {
226 for_each_node_mask(node, node_possible_map)
228 nr_node_ids = num + 1;
231 /* allocate the map */
232 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
233 DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids);
235 pr_debug("Node to cpumask map at %p for %d nodes\n",
238 /* node_to_cpumask() will now work */
239 node_to_cpumask_map = map;
242 void __cpuinit numa_set_node(int cpu, int node)
244 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
246 /* early setting, no percpu area yet */
247 if (cpu_to_node_map) {
248 cpu_to_node_map[cpu] = node;
252 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
253 if (cpu >= nr_cpu_ids || !per_cpu_offset(cpu)) {
254 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
259 per_cpu(x86_cpu_to_node_map, cpu) = node;
261 if (node != NUMA_NO_NODE)
262 per_cpu(node_number, cpu) = node;
265 void __cpuinit numa_clear_node(int cpu)
267 numa_set_node(cpu, NUMA_NO_NODE);
270 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
272 void __cpuinit numa_add_cpu(int cpu)
274 cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
277 void __cpuinit numa_remove_cpu(int cpu)
279 cpu_clear(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
282 #else /* CONFIG_DEBUG_PER_CPU_MAPS */
285 * --------- debug versions of the numa functions ---------
287 static void __cpuinit numa_set_cpumask(int cpu, int enable)
289 int node = early_cpu_to_node(cpu);
293 if (node_to_cpumask_map == NULL) {
294 printk(KERN_ERR "node_to_cpumask_map NULL\n");
299 mask = &node_to_cpumask_map[node];
303 cpu_clear(cpu, *mask);
305 cpulist_scnprintf(buf, sizeof(buf), mask);
306 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
307 enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
310 void __cpuinit numa_add_cpu(int cpu)
312 numa_set_cpumask(cpu, 1);
315 void __cpuinit numa_remove_cpu(int cpu)
317 numa_set_cpumask(cpu, 0);
320 int cpu_to_node(int cpu)
322 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
324 "cpu_to_node(%d): usage too early!\n", cpu);
326 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
328 return per_cpu(x86_cpu_to_node_map, cpu);
330 EXPORT_SYMBOL(cpu_to_node);
333 * Same function as cpu_to_node() but used if called before the
334 * per_cpu areas are setup.
336 int early_cpu_to_node(int cpu)
338 if (early_per_cpu_ptr(x86_cpu_to_node_map))
339 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
341 if (!per_cpu_offset(cpu)) {
343 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
347 return per_cpu(x86_cpu_to_node_map, cpu);
352 static const cpumask_t cpu_mask_none;
355 * Returns a pointer to the bitmask of CPUs on Node 'node'.
357 const cpumask_t *cpumask_of_node(int node)
359 if (node_to_cpumask_map == NULL) {
361 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
364 return (const cpumask_t *)&cpu_online_map;
366 if (node >= nr_node_ids) {
368 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
371 return &cpu_mask_none;
373 return &node_to_cpumask_map[node];
375 EXPORT_SYMBOL(cpumask_of_node);
378 * Returns a bitmask of CPUs on Node 'node'.
380 * Side note: this function creates the returned cpumask on the stack
381 * so with a high NR_CPUS count, excessive stack space is used. The
382 * node_to_cpumask_ptr function should be used whenever possible.
384 cpumask_t node_to_cpumask(int node)
386 if (node_to_cpumask_map == NULL) {
388 "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
390 return cpu_online_map;
392 if (node >= nr_node_ids) {
394 "node_to_cpumask(%d): node > nr_node_ids(%d)\n",
397 return cpu_mask_none;
399 return node_to_cpumask_map[node];
401 EXPORT_SYMBOL(node_to_cpumask);
404 * --------- end of debug versions of the numa functions ---------
407 #endif /* CONFIG_DEBUG_PER_CPU_MAPS */
409 #endif /* X86_64_NUMA */