2 * arch/s390/kernel/smp.c
4 * Copyright IBM Corp. 1999,2007
5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 * Heiko Carstens (heiko.carstens@de.ibm.com)
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
13 * We work with logical cpu numbering everywhere we can. The only
14 * functions using the real cpu address (got from STAP) are the sigp
15 * functions. For all other functions we use the identity mapping.
16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
17 * used e.g. to find the idle task belonging to a logical cpu. Every array
18 * in the kernel is sorted by the logical cpu number and not by the physical
19 * one which is causing all the confusion with __cpu_logical_map and
20 * cpu_number_map in other architectures.
23 #define KMSG_COMPONENT "cpu"
24 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
26 #include <linux/module.h>
27 #include <linux/init.h>
29 #include <linux/err.h>
30 #include <linux/spinlock.h>
31 #include <linux/kernel_stat.h>
32 #include <linux/delay.h>
33 #include <linux/cache.h>
34 #include <linux/interrupt.h>
35 #include <linux/cpu.h>
36 #include <linux/timex.h>
37 #include <linux/bootmem.h>
39 #include <asm/setup.h>
41 #include <asm/pgalloc.h>
43 #include <asm/s390_ext.h>
44 #include <asm/cpcmd.h>
45 #include <asm/tlbflush.h>
46 #include <asm/timer.h>
47 #include <asm/lowcore.h>
54 * An array with a pointer the lowcore of every CPU.
56 struct _lowcore *lowcore_ptr[NR_CPUS];
57 EXPORT_SYMBOL(lowcore_ptr);
59 cpumask_t cpu_online_map = CPU_MASK_NONE;
60 EXPORT_SYMBOL(cpu_online_map);
62 cpumask_t cpu_possible_map = CPU_MASK_ALL;
63 EXPORT_SYMBOL(cpu_possible_map);
65 static struct task_struct *current_set[NR_CPUS];
67 static u8 smp_cpu_type;
68 static int smp_use_sigp_detection;
75 DEFINE_MUTEX(smp_cpu_state_mutex);
76 int smp_cpu_polarization[NR_CPUS];
77 static int smp_cpu_state[NR_CPUS];
78 static int cpu_management;
80 static DEFINE_PER_CPU(struct cpu, cpu_devices);
82 static void smp_ext_bitcall(int, ec_bit_sig);
84 void smp_send_stop(void)
88 /* Disable all interrupts/machine checks */
89 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
91 /* write magic number to zero page (absolute 0) */
92 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
94 /* stop all processors */
95 for_each_online_cpu(cpu) {
96 if (cpu == smp_processor_id())
99 rc = signal_processor(cpu, sigp_stop);
100 } while (rc == sigp_busy);
102 while (!smp_cpu_not_running(cpu))
108 * This is the main routine where commands issued by other
112 static void do_ext_call_interrupt(__u16 code)
117 * handle bit signal external calls
119 * For the ec_schedule signal we have to do nothing. All the work
120 * is done automatically when we return from the interrupt.
122 bits = xchg(&S390_lowcore.ext_call_fast, 0);
124 if (test_bit(ec_call_function, &bits))
125 generic_smp_call_function_interrupt();
127 if (test_bit(ec_call_function_single, &bits))
128 generic_smp_call_function_single_interrupt();
132 * Send an external call sigp to another cpu and return without waiting
133 * for its completion.
135 static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
138 * Set signaling bit in lowcore of target cpu and kick it
140 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
141 while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy)
145 void arch_send_call_function_ipi(cpumask_t mask)
149 for_each_cpu_mask(cpu, mask)
150 smp_ext_bitcall(cpu, ec_call_function);
153 void arch_send_call_function_single_ipi(int cpu)
155 smp_ext_bitcall(cpu, ec_call_function_single);
160 * this function sends a 'purge tlb' signal to another CPU.
162 static void smp_ptlb_callback(void *info)
167 void smp_ptlb_all(void)
169 on_each_cpu(smp_ptlb_callback, NULL, 1);
171 EXPORT_SYMBOL(smp_ptlb_all);
172 #endif /* ! CONFIG_64BIT */
175 * this function sends a 'reschedule' IPI to another CPU.
176 * it goes straight through and wastes no time serializing
177 * anything. Worst case is that we lose a reschedule ...
179 void smp_send_reschedule(int cpu)
181 smp_ext_bitcall(cpu, ec_schedule);
185 * parameter area for the set/clear control bit callbacks
187 struct ec_creg_mask_parms {
188 unsigned long orvals[16];
189 unsigned long andvals[16];
193 * callback for setting/clearing control bits
195 static void smp_ctl_bit_callback(void *info)
197 struct ec_creg_mask_parms *pp = info;
198 unsigned long cregs[16];
201 __ctl_store(cregs, 0, 15);
202 for (i = 0; i <= 15; i++)
203 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
204 __ctl_load(cregs, 0, 15);
208 * Set a bit in a control register of all cpus
210 void smp_ctl_set_bit(int cr, int bit)
212 struct ec_creg_mask_parms parms;
214 memset(&parms.orvals, 0, sizeof(parms.orvals));
215 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
216 parms.orvals[cr] = 1 << bit;
217 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
219 EXPORT_SYMBOL(smp_ctl_set_bit);
222 * Clear a bit in a control register of all cpus
224 void smp_ctl_clear_bit(int cr, int bit)
226 struct ec_creg_mask_parms parms;
228 memset(&parms.orvals, 0, sizeof(parms.orvals));
229 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
230 parms.andvals[cr] = ~(1L << bit);
231 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
233 EXPORT_SYMBOL(smp_ctl_clear_bit);
236 * In early ipl state a temp. logically cpu number is needed, so the sigp
237 * functions can be used to sense other cpus. Since NR_CPUS is >= 2 on
238 * CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1.
240 #define CPU_INIT_NO 1
242 #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
245 * zfcpdump_prefix_array holds prefix registers for the following scenario:
246 * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to
247 * save its prefix registers, since they get lost, when switching from 31 bit
250 unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \
251 __attribute__((__section__(".data")));
253 static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
255 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
257 if (cpu >= NR_CPUS) {
258 pr_warning("CPU %i exceeds the maximum %i and is excluded from "
259 "the dump\n", cpu, NR_CPUS - 1);
262 zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL);
263 __cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu;
264 while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) ==
267 memcpy(zfcpdump_save_areas[cpu],
268 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
271 /* copy original prefix register */
272 zfcpdump_save_areas[cpu]->s390x.pref_reg = zfcpdump_prefix_array[cpu];
276 union save_area *zfcpdump_save_areas[NR_CPUS + 1];
277 EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
281 static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
283 #endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */
285 static int cpu_stopped(int cpu)
289 /* Check for stopped state */
290 if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==
291 sigp_status_stored) {
298 static int cpu_known(int cpu_id)
302 for_each_present_cpu(cpu) {
303 if (__cpu_logical_map[cpu] == cpu_id)
309 static int smp_rescan_cpus_sigp(cpumask_t avail)
311 int cpu_id, logical_cpu;
313 logical_cpu = first_cpu(avail);
314 if (logical_cpu == NR_CPUS)
316 for (cpu_id = 0; cpu_id <= 65535; cpu_id++) {
317 if (cpu_known(cpu_id))
319 __cpu_logical_map[logical_cpu] = cpu_id;
320 smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
321 if (!cpu_stopped(logical_cpu))
323 cpu_set(logical_cpu, cpu_present_map);
324 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
325 logical_cpu = next_cpu(logical_cpu, avail);
326 if (logical_cpu == NR_CPUS)
332 static int smp_rescan_cpus_sclp(cpumask_t avail)
334 struct sclp_cpu_info *info;
335 int cpu_id, logical_cpu, cpu;
338 logical_cpu = first_cpu(avail);
339 if (logical_cpu == NR_CPUS)
341 info = kmalloc(sizeof(*info), GFP_KERNEL);
344 rc = sclp_get_cpu_info(info);
347 for (cpu = 0; cpu < info->combined; cpu++) {
348 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
350 cpu_id = info->cpu[cpu].address;
351 if (cpu_known(cpu_id))
353 __cpu_logical_map[logical_cpu] = cpu_id;
354 smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
355 cpu_set(logical_cpu, cpu_present_map);
356 if (cpu >= info->configured)
357 smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
359 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
360 logical_cpu = next_cpu(logical_cpu, avail);
361 if (logical_cpu == NR_CPUS)
369 static int __smp_rescan_cpus(void)
373 cpus_xor(avail, cpu_possible_map, cpu_present_map);
374 if (smp_use_sigp_detection)
375 return smp_rescan_cpus_sigp(avail);
377 return smp_rescan_cpus_sclp(avail);
380 static void __init smp_detect_cpus(void)
382 unsigned int cpu, c_cpus, s_cpus;
383 struct sclp_cpu_info *info;
384 u16 boot_cpu_addr, cpu_addr;
388 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
389 info = kmalloc(sizeof(*info), GFP_KERNEL);
391 panic("smp_detect_cpus failed to allocate memory\n");
392 /* Use sigp detection algorithm if sclp doesn't work. */
393 if (sclp_get_cpu_info(info)) {
394 smp_use_sigp_detection = 1;
395 for (cpu = 0; cpu <= 65535; cpu++) {
396 if (cpu == boot_cpu_addr)
398 __cpu_logical_map[CPU_INIT_NO] = cpu;
399 if (!cpu_stopped(CPU_INIT_NO))
401 smp_get_save_area(c_cpus, cpu);
407 if (info->has_cpu_type) {
408 for (cpu = 0; cpu < info->combined; cpu++) {
409 if (info->cpu[cpu].address == boot_cpu_addr) {
410 smp_cpu_type = info->cpu[cpu].type;
416 for (cpu = 0; cpu < info->combined; cpu++) {
417 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
419 cpu_addr = info->cpu[cpu].address;
420 if (cpu_addr == boot_cpu_addr)
422 __cpu_logical_map[CPU_INIT_NO] = cpu_addr;
423 if (!cpu_stopped(CPU_INIT_NO)) {
427 smp_get_save_area(c_cpus, cpu_addr);
432 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
439 * Activate a secondary processor.
441 int __cpuinit start_secondary(void *cpuvoid)
446 /* Enable TOD clock interrupts on the secondary cpu. */
448 /* Enable cpu timer interrupts on the secondary cpu. */
450 /* Enable pfault pseudo page faults on this cpu. */
453 /* call cpu notifiers */
454 notify_cpu_starting(smp_processor_id());
455 /* Mark this cpu as online */
457 cpu_set(smp_processor_id(), cpu_online_map);
459 /* Switch on interrupts */
461 /* Print info about this processor */
462 print_cpu_info(&S390_lowcore.cpu_data);
463 /* cpu_idle will call schedule for us */
468 static void __init smp_create_idle(unsigned int cpu)
470 struct task_struct *p;
473 * don't care about the psw and regs settings since we'll never
474 * reschedule the forked task.
478 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
479 current_set[cpu] = p;
482 static int __cpuinit smp_alloc_lowcore(int cpu)
484 unsigned long async_stack, panic_stack;
485 struct _lowcore *lowcore;
488 lc_order = sizeof(long) == 8 ? 1 : 0;
489 lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, lc_order);
492 async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
493 panic_stack = __get_free_page(GFP_KERNEL);
494 if (!panic_stack || !async_stack)
496 memcpy(lowcore, &S390_lowcore, 512);
497 memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512);
498 lowcore->async_stack = async_stack + ASYNC_SIZE;
499 lowcore->panic_stack = panic_stack + PAGE_SIZE;
502 if (MACHINE_HAS_IEEE) {
503 unsigned long save_area;
505 save_area = get_zeroed_page(GFP_KERNEL);
508 lowcore->extended_save_area_addr = (u32) save_area;
511 if (vdso_alloc_per_cpu(cpu, lowcore))
514 lowcore_ptr[cpu] = lowcore;
518 free_page(panic_stack);
519 free_pages(async_stack, ASYNC_ORDER);
520 free_pages((unsigned long) lowcore, lc_order);
524 #ifdef CONFIG_HOTPLUG_CPU
525 static void smp_free_lowcore(int cpu)
527 struct _lowcore *lowcore;
530 lc_order = sizeof(long) == 8 ? 1 : 0;
531 lowcore = lowcore_ptr[cpu];
533 if (MACHINE_HAS_IEEE)
534 free_page((unsigned long) lowcore->extended_save_area_addr);
536 vdso_free_per_cpu(cpu, lowcore);
538 free_page(lowcore->panic_stack - PAGE_SIZE);
539 free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER);
540 free_pages((unsigned long) lowcore, lc_order);
541 lowcore_ptr[cpu] = NULL;
543 #endif /* CONFIG_HOTPLUG_CPU */
545 /* Upping and downing of CPUs */
546 int __cpuinit __cpu_up(unsigned int cpu)
548 struct task_struct *idle;
549 struct _lowcore *cpu_lowcore;
550 struct stack_frame *sf;
553 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
555 if (smp_alloc_lowcore(cpu))
558 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
559 cpu, sigp_set_prefix);
563 idle = current_set[cpu];
564 cpu_lowcore = lowcore_ptr[cpu];
565 cpu_lowcore->kernel_stack = (unsigned long)
566 task_stack_page(idle) + THREAD_SIZE;
567 cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle);
568 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
569 - sizeof(struct pt_regs)
570 - sizeof(struct stack_frame));
571 memset(sf, 0, sizeof(struct stack_frame));
572 sf->gprs[9] = (unsigned long) sf;
573 cpu_lowcore->save_area[15] = (unsigned long) sf;
574 __ctl_store(cpu_lowcore->cregs_save_area, 0, 15);
577 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
578 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
579 cpu_lowcore->current_task = (unsigned long) idle;
580 cpu_lowcore->cpu_data.cpu_nr = cpu;
581 cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce;
582 cpu_lowcore->ipl_device = S390_lowcore.ipl_device;
585 while (signal_processor(cpu, sigp_restart) == sigp_busy)
588 while (!cpu_online(cpu))
593 static int __init setup_possible_cpus(char *s)
597 pcpus = simple_strtoul(s, NULL, 0);
598 cpu_possible_map = cpumask_of_cpu(0);
599 for (cpu = 1; cpu < pcpus && cpu < NR_CPUS; cpu++)
600 cpu_set(cpu, cpu_possible_map);
603 early_param("possible_cpus", setup_possible_cpus);
605 #ifdef CONFIG_HOTPLUG_CPU
607 int __cpu_disable(void)
609 struct ec_creg_mask_parms cr_parms;
610 int cpu = smp_processor_id();
612 cpu_clear(cpu, cpu_online_map);
614 /* Disable pfault pseudo page faults on this cpu. */
617 memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals));
618 memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals));
620 /* disable all external interrupts */
621 cr_parms.orvals[0] = 0;
622 cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 |
623 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4);
624 /* disable all I/O interrupts */
625 cr_parms.orvals[6] = 0;
626 cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
627 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24);
628 /* disable most machine checks */
629 cr_parms.orvals[14] = 0;
630 cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 |
633 smp_ctl_bit_callback(&cr_parms);
638 void __cpu_die(unsigned int cpu)
640 /* Wait until target cpu is down */
641 while (!smp_cpu_not_running(cpu))
643 smp_free_lowcore(cpu);
644 pr_info("Processor %d stopped\n", cpu);
650 signal_processor(smp_processor_id(), sigp_stop);
655 #endif /* CONFIG_HOTPLUG_CPU */
657 void __init smp_prepare_cpus(unsigned int max_cpus)
660 unsigned long save_area = 0;
662 unsigned long async_stack, panic_stack;
663 struct _lowcore *lowcore;
669 /* request the 0x1201 emergency signal external interrupt */
670 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
671 panic("Couldn't request external interrupt 0x1201");
672 print_cpu_info(&S390_lowcore.cpu_data);
674 /* Reallocate current lowcore, but keep its contents. */
675 lc_order = sizeof(long) == 8 ? 1 : 0;
676 lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, lc_order);
677 panic_stack = __get_free_page(GFP_KERNEL);
678 async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
679 BUG_ON(!lowcore || !panic_stack || !async_stack);
681 if (MACHINE_HAS_IEEE)
682 save_area = get_zeroed_page(GFP_KERNEL);
685 local_mcck_disable();
686 lowcore_ptr[smp_processor_id()] = lowcore;
687 *lowcore = S390_lowcore;
688 lowcore->panic_stack = panic_stack + PAGE_SIZE;
689 lowcore->async_stack = async_stack + ASYNC_SIZE;
691 if (MACHINE_HAS_IEEE)
692 lowcore->extended_save_area_addr = (u32) save_area;
694 BUG_ON(vdso_alloc_per_cpu(smp_processor_id(), lowcore));
696 set_prefix((u32)(unsigned long) lowcore);
699 for_each_possible_cpu(cpu)
700 if (cpu != smp_processor_id())
701 smp_create_idle(cpu);
704 void __init smp_prepare_boot_cpu(void)
706 BUG_ON(smp_processor_id() != 0);
708 current_thread_info()->cpu = 0;
709 cpu_set(0, cpu_present_map);
710 cpu_set(0, cpu_online_map);
711 S390_lowcore.percpu_offset = __per_cpu_offset[0];
712 current_set[0] = current;
713 smp_cpu_state[0] = CPU_STATE_CONFIGURED;
714 smp_cpu_polarization[0] = POLARIZATION_UNKNWN;
717 void __init smp_cpus_done(unsigned int max_cpus)
722 * the frequency of the profiling timer can be changed
723 * by writing a multiplier value into /proc/profile.
725 * usually you want to run this on all CPUs ;)
727 int setup_profiling_timer(unsigned int multiplier)
732 #ifdef CONFIG_HOTPLUG_CPU
733 static ssize_t cpu_configure_show(struct sys_device *dev,
734 struct sysdev_attribute *attr, char *buf)
738 mutex_lock(&smp_cpu_state_mutex);
739 count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]);
740 mutex_unlock(&smp_cpu_state_mutex);
744 static ssize_t cpu_configure_store(struct sys_device *dev,
745 struct sysdev_attribute *attr,
746 const char *buf, size_t count)
752 if (sscanf(buf, "%d %c", &val, &delim) != 1)
754 if (val != 0 && val != 1)
758 mutex_lock(&smp_cpu_state_mutex);
765 if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) {
766 rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
768 smp_cpu_state[cpu] = CPU_STATE_STANDBY;
769 smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
774 if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) {
775 rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
777 smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
778 smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
786 mutex_unlock(&smp_cpu_state_mutex);
788 return rc ? rc : count;
790 static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
791 #endif /* CONFIG_HOTPLUG_CPU */
793 static ssize_t cpu_polarization_show(struct sys_device *dev,
794 struct sysdev_attribute *attr, char *buf)
799 mutex_lock(&smp_cpu_state_mutex);
800 switch (smp_cpu_polarization[cpu]) {
801 case POLARIZATION_HRZ:
802 count = sprintf(buf, "horizontal\n");
804 case POLARIZATION_VL:
805 count = sprintf(buf, "vertical:low\n");
807 case POLARIZATION_VM:
808 count = sprintf(buf, "vertical:medium\n");
810 case POLARIZATION_VH:
811 count = sprintf(buf, "vertical:high\n");
814 count = sprintf(buf, "unknown\n");
817 mutex_unlock(&smp_cpu_state_mutex);
820 static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL);
822 static ssize_t show_cpu_address(struct sys_device *dev,
823 struct sysdev_attribute *attr, char *buf)
825 return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
827 static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL);
830 static struct attribute *cpu_common_attrs[] = {
831 #ifdef CONFIG_HOTPLUG_CPU
832 &attr_configure.attr,
835 &attr_polarization.attr,
839 static struct attribute_group cpu_common_attr_group = {
840 .attrs = cpu_common_attrs,
843 static ssize_t show_capability(struct sys_device *dev,
844 struct sysdev_attribute *attr, char *buf)
846 unsigned int capability;
849 rc = get_cpu_capability(&capability);
852 return sprintf(buf, "%u\n", capability);
854 static SYSDEV_ATTR(capability, 0444, show_capability, NULL);
856 static ssize_t show_idle_count(struct sys_device *dev,
857 struct sysdev_attribute *attr, char *buf)
859 struct s390_idle_data *idle;
860 unsigned long long idle_count;
862 idle = &per_cpu(s390_idle, dev->id);
863 spin_lock(&idle->lock);
864 idle_count = idle->idle_count;
865 if (idle->idle_enter)
867 spin_unlock(&idle->lock);
868 return sprintf(buf, "%llu\n", idle_count);
870 static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL);
872 static ssize_t show_idle_time(struct sys_device *dev,
873 struct sysdev_attribute *attr, char *buf)
875 struct s390_idle_data *idle;
876 unsigned long long now, idle_time, idle_enter;
878 idle = &per_cpu(s390_idle, dev->id);
879 spin_lock(&idle->lock);
881 idle_time = idle->idle_time;
882 idle_enter = idle->idle_enter;
883 if (idle_enter != 0ULL && idle_enter < now)
884 idle_time += now - idle_enter;
885 spin_unlock(&idle->lock);
886 return sprintf(buf, "%llu\n", idle_time >> 12);
888 static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
890 static struct attribute *cpu_online_attrs[] = {
891 &attr_capability.attr,
892 &attr_idle_count.attr,
893 &attr_idle_time_us.attr,
897 static struct attribute_group cpu_online_attr_group = {
898 .attrs = cpu_online_attrs,
901 static int __cpuinit smp_cpu_notify(struct notifier_block *self,
902 unsigned long action, void *hcpu)
904 unsigned int cpu = (unsigned int)(long)hcpu;
905 struct cpu *c = &per_cpu(cpu_devices, cpu);
906 struct sys_device *s = &c->sysdev;
907 struct s390_idle_data *idle;
911 case CPU_ONLINE_FROZEN:
912 idle = &per_cpu(s390_idle, cpu);
913 spin_lock_irq(&idle->lock);
914 idle->idle_enter = 0;
916 idle->idle_count = 0;
917 spin_unlock_irq(&idle->lock);
918 if (sysfs_create_group(&s->kobj, &cpu_online_attr_group))
922 case CPU_DEAD_FROZEN:
923 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
929 static struct notifier_block __cpuinitdata smp_cpu_nb = {
930 .notifier_call = smp_cpu_notify,
933 static int __devinit smp_add_present_cpu(int cpu)
935 struct cpu *c = &per_cpu(cpu_devices, cpu);
936 struct sys_device *s = &c->sysdev;
940 rc = register_cpu(c, cpu);
943 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
946 if (!cpu_online(cpu))
948 rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
951 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
953 #ifdef CONFIG_HOTPLUG_CPU
960 #ifdef CONFIG_HOTPLUG_CPU
962 int __ref smp_rescan_cpus(void)
969 mutex_lock(&smp_cpu_state_mutex);
970 newcpus = cpu_present_map;
971 rc = __smp_rescan_cpus();
974 cpus_andnot(newcpus, cpu_present_map, newcpus);
975 for_each_cpu_mask(cpu, newcpus) {
976 rc = smp_add_present_cpu(cpu);
978 cpu_clear(cpu, cpu_present_map);
982 mutex_unlock(&smp_cpu_state_mutex);
984 if (!cpus_empty(newcpus))
985 topology_schedule_update();
989 static ssize_t __ref rescan_store(struct sysdev_class *class, const char *buf,
994 rc = smp_rescan_cpus();
995 return rc ? rc : count;
997 static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store);
998 #endif /* CONFIG_HOTPLUG_CPU */
1000 static ssize_t dispatching_show(struct sysdev_class *class, char *buf)
1004 mutex_lock(&smp_cpu_state_mutex);
1005 count = sprintf(buf, "%d\n", cpu_management);
1006 mutex_unlock(&smp_cpu_state_mutex);
1010 static ssize_t dispatching_store(struct sysdev_class *dev, const char *buf,
1016 if (sscanf(buf, "%d %c", &val, &delim) != 1)
1018 if (val != 0 && val != 1)
1022 mutex_lock(&smp_cpu_state_mutex);
1023 if (cpu_management == val)
1025 rc = topology_set_cpu_management(val);
1027 cpu_management = val;
1029 mutex_unlock(&smp_cpu_state_mutex);
1031 return rc ? rc : count;
1033 static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show,
1036 static int __init topology_init(void)
1041 register_cpu_notifier(&smp_cpu_nb);
1043 #ifdef CONFIG_HOTPLUG_CPU
1044 rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_rescan);
1048 rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_dispatching);
1051 for_each_present_cpu(cpu) {
1052 rc = smp_add_present_cpu(cpu);
1058 subsys_initcall(topology_init);