]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/s390/kernel/topology.c
Merge branch 'linus' into tmp.x86.mpparse.new
[linux-2.6-omap-h63xx.git] / arch / s390 / kernel / topology.c
index 369dc1c3bd10a7b1a0019933bb64e46ce1cf7273..661a07217057a37b8d08b7ad85c8d4a2508a2bd0 100644 (file)
@@ -1,6 +1,4 @@
 /*
- *  arch/s390/kernel/topology.c
- *
  *    Copyright IBM Corp. 2007
  *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
  */
@@ -11,6 +9,7 @@
 #include <linux/device.h>
 #include <linux/bootmem.h>
 #include <linux/sched.h>
+#include <linux/kthread.h>
 #include <linux/workqueue.h>
 #include <linux/cpu.h>
 #include <linux/smp.h>
 #include <asm/sysinfo.h>
 
 #define CPU_BITS 64
+#define NR_MAG 6
+
+#define PTF_HORIZONTAL (0UL)
+#define PTF_VERTICAL   (1UL)
+#define PTF_CHECK      (2UL)
 
 struct tl_cpu {
-       unsigned char reserved[6];
+       unsigned char reserved0[4];
+       unsigned char :6;
+       unsigned char pp:2;
+       unsigned char reserved1;
        unsigned short origin;
        unsigned long mask[CPU_BITS / BITS_PER_LONG];
 };
@@ -36,8 +43,6 @@ union tl_entry {
        struct tl_container container;
 };
 
-#define NR_MAG 6
-
 struct tl_info {
        unsigned char reserved0[2];
        unsigned short length;
@@ -62,6 +67,8 @@ static struct timer_list topology_timer;
 static void set_topology_timer(void);
 static DECLARE_WORK(topology_work, topology_work_fn);
 
+cpumask_t cpu_core_map[NR_CPUS];
+
 cpumask_t cpu_coregroup_map(unsigned int cpu)
 {
        struct core_info *core = &core_info;
@@ -96,8 +103,10 @@ static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
 
                rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin;
                for_each_present_cpu(lcpu) {
-                       if (__cpu_logical_map[lcpu] == rcpu)
+                       if (__cpu_logical_map[lcpu] == rcpu) {
                                cpu_set(lcpu, core->mask);
+                               smp_cpu_polarization[lcpu] = tl_cpu->pp;
+                       }
                }
        }
 }
@@ -127,7 +136,7 @@ static void tl_to_cores(struct tl_info *info)
 
        mutex_lock(&smp_cpu_state_mutex);
        clear_cores();
-       tle = (union tl_entry *)&info->tle;
+       tle = info->tle;
        end = (union tl_entry *)((unsigned long)info + info->length);
        while (tle < end) {
                switch (tle->nl) {
@@ -152,7 +161,17 @@ static void tl_to_cores(struct tl_info *info)
        mutex_unlock(&smp_cpu_state_mutex);
 }
 
-static int ptf(void)
+static void topology_update_polarization_simple(void)
+{
+       int cpu;
+
+       mutex_lock(&smp_cpu_state_mutex);
+       for_each_present_cpu(cpu)
+               smp_cpu_polarization[cpu] = POLARIZATION_HRZ;
+       mutex_unlock(&smp_cpu_state_mutex);
+}
+
+static int ptf(unsigned long fc)
 {
        int rc;
 
@@ -161,36 +180,81 @@ static int ptf(void)
                "       ipm     %0\n"
                "       srl     %0,28\n"
                : "=d" (rc)
-               : "d" (2UL)  : "cc");
+               : "d" (fc)  : "cc");
+       return rc;
+}
+
+int topology_set_cpu_management(int fc)
+{
+       int cpu;
+       int rc;
+
+       if (!machine_has_topology)
+               return -EOPNOTSUPP;
+       if (fc)
+               rc = ptf(PTF_VERTICAL);
+       else
+               rc = ptf(PTF_HORIZONTAL);
+       if (rc)
+               return -EBUSY;
+       for_each_present_cpu(cpu)
+               smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
        return rc;
 }
 
+static void update_cpu_core_map(void)
+{
+       int cpu;
+
+       for_each_present_cpu(cpu)
+               cpu_core_map[cpu] = cpu_coregroup_map(cpu);
+}
+
 void arch_update_cpu_topology(void)
 {
        struct tl_info *info = tl_info;
        struct sys_device *sysdev;
        int cpu;
 
-       if (!machine_has_topology)
+       if (!machine_has_topology) {
+               update_cpu_core_map();
+               topology_update_polarization_simple();
                return;
-       ptf();
+       }
        stsi(info, 15, 1, 2);
        tl_to_cores(info);
+       update_cpu_core_map();
        for_each_online_cpu(cpu) {
                sysdev = get_cpu_sysdev(cpu);
                kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
        }
 }
 
-static void topology_work_fn(struct work_struct *work)
+static int topology_kthread(void *data)
 {
        arch_reinit_sched_domains();
+       return 0;
+}
+
+static void topology_work_fn(struct work_struct *work)
+{
+       /* We can't call arch_reinit_sched_domains() from a multi-threaded
+        * workqueue context since it may deadlock in case of cpu hotplug.
+        * So we have to create a kernel thread in order to call
+        * arch_reinit_sched_domains().
+        */
+       kthread_run(topology_kthread, NULL, "topology_update");
+}
+
+void topology_schedule_update(void)
+{
+       schedule_work(&topology_work);
 }
 
 static void topology_timer_fn(unsigned long ignored)
 {
-       if (ptf())
-               schedule_work(&topology_work);
+       if (ptf(PTF_CHECK))
+               topology_schedule_update();
        set_topology_timer();
 }
 
@@ -211,18 +275,23 @@ static int __init init_topology_update(void)
 {
        int rc;
 
-       if (!machine_has_topology)
-               return 0;
-       init_timer(&topology_timer);
+       rc = 0;
+       if (!machine_has_topology) {
+               topology_update_polarization_simple();
+               goto out;
+       }
+       init_timer_deferrable(&topology_timer);
        if (machine_has_topology_irq) {
                rc = register_external_interrupt(0x2005, topology_interrupt);
                if (rc)
-                       return rc;
+                       goto out;
                ctl_set_bit(0, 8);
        }
        else
                set_topology_timer();
-       return 0;
+out:
+       update_cpu_core_map();
+       return rc;
 }
 __initcall(init_topology_update);