2 * Copyright IBM Corp. 2007
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 #include <linux/kernel.h>
8 #include <linux/init.h>
9 #include <linux/device.h>
10 #include <linux/bootmem.h>
11 #include <linux/sched.h>
12 #include <linux/workqueue.h>
13 #include <linux/cpu.h>
14 #include <linux/smp.h>
15 #include <linux/cpuset.h>
16 #include <asm/delay.h>
17 #include <asm/s390_ext.h>
18 #include <asm/sysinfo.h>
23 #define PTF_HORIZONTAL (0UL)
24 #define PTF_VERTICAL (1UL)
25 #define PTF_CHECK (2UL)
28 unsigned char reserved0[4];
31 unsigned char reserved1;
32 unsigned short origin;
33 unsigned long mask[CPU_BITS / BITS_PER_LONG];
37 unsigned char reserved[8];
43 struct tl_container container;
47 unsigned char reserved0[2];
48 unsigned short length;
49 unsigned char mag[NR_MAG];
50 unsigned char reserved1;
52 unsigned char reserved2[4];
53 union tl_entry tle[0];
57 struct core_info *next;
61 static void topology_work_fn(struct work_struct *work);
62 static struct tl_info *tl_info;
63 static struct core_info core_info;
64 static int machine_has_topology;
65 static int machine_has_topology_irq;
66 static struct timer_list topology_timer;
67 static void set_topology_timer(void);
68 static DECLARE_WORK(topology_work, topology_work_fn);
69 /* topology_lock protects the core linked list */
70 static DEFINE_SPINLOCK(topology_lock);
72 cpumask_t cpu_core_map[NR_CPUS];
74 cpumask_t cpu_coregroup_map(unsigned int cpu)
76 struct core_info *core = &core_info;
81 if (!machine_has_topology)
82 return cpu_possible_map;
83 spin_lock_irqsave(&topology_lock, flags);
85 if (cpu_isset(cpu, core->mask)) {
91 spin_unlock_irqrestore(&topology_lock, flags);
93 mask = cpumask_of_cpu(cpu);
97 static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
101 for (cpu = find_first_bit(&tl_cpu->mask[0], CPU_BITS);
103 cpu = find_next_bit(&tl_cpu->mask[0], CPU_BITS, cpu + 1))
105 unsigned int rcpu, lcpu;
107 rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin;
108 for_each_present_cpu(lcpu) {
109 if (__cpu_logical_map[lcpu] == rcpu) {
110 cpu_set(lcpu, core->mask);
111 smp_cpu_polarization[lcpu] = tl_cpu->pp;
117 static void clear_cores(void)
119 struct core_info *core = &core_info;
122 cpus_clear(core->mask);
127 static union tl_entry *next_tle(union tl_entry *tle)
130 return (union tl_entry *)((struct tl_container *)tle + 1);
132 return (union tl_entry *)((struct tl_cpu *)tle + 1);
135 static void tl_to_cores(struct tl_info *info)
137 union tl_entry *tle, *end;
138 struct core_info *core = &core_info;
140 spin_lock_irq(&topology_lock);
143 end = (union tl_entry *)((unsigned long)info + info->length);
155 add_cpus_to_core(&tle->cpu, core);
159 machine_has_topology = 0;
164 spin_unlock_irq(&topology_lock);
167 static void topology_update_polarization_simple(void)
171 mutex_lock(&smp_cpu_state_mutex);
172 for_each_possible_cpu(cpu)
173 smp_cpu_polarization[cpu] = POLARIZATION_HRZ;
174 mutex_unlock(&smp_cpu_state_mutex);
177 static int ptf(unsigned long fc)
182 " .insn rre,0xb9a20000,%1,%1\n"
190 int topology_set_cpu_management(int fc)
195 if (!machine_has_topology)
198 rc = ptf(PTF_VERTICAL);
200 rc = ptf(PTF_HORIZONTAL);
203 for_each_possible_cpu(cpu)
204 smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
208 static void update_cpu_core_map(void)
212 for_each_possible_cpu(cpu)
213 cpu_core_map[cpu] = cpu_coregroup_map(cpu);
216 void arch_update_cpu_topology(void)
218 struct tl_info *info = tl_info;
219 struct sys_device *sysdev;
222 if (!machine_has_topology) {
223 update_cpu_core_map();
224 topology_update_polarization_simple();
227 stsi(info, 15, 1, 2);
229 update_cpu_core_map();
230 for_each_online_cpu(cpu) {
231 sysdev = get_cpu_sysdev(cpu);
232 kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
236 static void topology_work_fn(struct work_struct *work)
238 rebuild_sched_domains();
241 void topology_schedule_update(void)
243 schedule_work(&topology_work);
246 static void topology_timer_fn(unsigned long ignored)
249 topology_schedule_update();
250 set_topology_timer();
253 static void set_topology_timer(void)
255 topology_timer.function = topology_timer_fn;
256 topology_timer.data = 0;
257 topology_timer.expires = jiffies + 60 * HZ;
258 add_timer(&topology_timer);
261 static void topology_interrupt(__u16 code)
263 schedule_work(&topology_work);
266 static int __init init_topology_update(void)
271 if (!machine_has_topology) {
272 topology_update_polarization_simple();
275 init_timer_deferrable(&topology_timer);
276 if (machine_has_topology_irq) {
277 rc = register_external_interrupt(0x2005, topology_interrupt);
283 set_topology_timer();
285 update_cpu_core_map();
288 __initcall(init_topology_update);
290 void __init s390_init_cpu_topology(void)
292 unsigned long long facility_bits;
293 struct tl_info *info;
294 struct core_info *core;
298 if (stfle(&facility_bits, 1) <= 0)
300 if (!(facility_bits & (1ULL << 52)) || !(facility_bits & (1ULL << 61)))
302 machine_has_topology = 1;
304 if (facility_bits & (1ULL << 51))
305 machine_has_topology_irq = 1;
307 tl_info = alloc_bootmem_pages(PAGE_SIZE);
309 stsi(info, 15, 1, 2);
311 nr_cores = info->mag[NR_MAG - 2];
312 for (i = 0; i < info->mnest - 2; i++)
313 nr_cores *= info->mag[NR_MAG - 3 - i];
315 printk(KERN_INFO "CPU topology:");
316 for (i = 0; i < NR_MAG; i++)
317 printk(" %d", info->mag[i]);
318 printk(" / %d\n", info->mnest);
321 for (i = 0; i < nr_cores; i++) {
322 core->next = alloc_bootmem(sizeof(struct core_info));
329 machine_has_topology = 0;
330 machine_has_topology_irq = 0;