]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/x86/kernel/cpu/intel_cacheinfo.c
x86_64: add pseudo-features for 32-bit compat syscall
[linux-2.6-omap-h63xx.git] / arch / x86 / kernel / cpu / intel_cacheinfo.c
index 9f530ff43c213ec3def93623fe36c11658721734..2c8afafa18e860ff98fafb243f2ba7964496c42d 100644 (file)
@@ -62,6 +62,7 @@ static struct _cache_table cache_table[] __cpuinitdata =
        { 0x4b, LVL_3,      8192 },     /* 16-way set assoc, 64 byte line size */
        { 0x4c, LVL_3,     12288 },     /* 12-way set assoc, 64 byte line size */
        { 0x4d, LVL_3,     16384 },     /* 16-way set assoc, 64 byte line size */
+       { 0x4e, LVL_2,      6144 },     /* 24-way set assoc, 64 byte line size */
        { 0x60, LVL_1_DATA, 16 },       /* 8-way set assoc, sectored cache, 64 byte line size */
        { 0x66, LVL_1_DATA, 8 },        /* 4-way set assoc, sectored cache, 64 byte line size */
        { 0x67, LVL_1_DATA, 16 },       /* 4-way set assoc, sectored cache, 64 byte line size */
@@ -129,7 +130,7 @@ struct _cpuid4_info {
        union _cpuid4_leaf_ebx ebx;
        union _cpuid4_leaf_ecx ecx;
        unsigned long size;
-       cpumask_t shared_cpu_map;
+       cpumask_t shared_cpu_map;       /* future?: only cpus/node is needed */
 };
 
 unsigned short                 num_cache_leaves;
@@ -352,8 +353,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
         */
        if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
                /* supports eax=2  call */
-               int i, j, n;
-               int regs[4];
+               int j, n;
+               unsigned int regs[4];
                unsigned char *dp = (unsigned char *)regs;
                int only_trace = 0;
 
@@ -368,7 +369,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
 
                        /* If bit 31 is set, this is an unknown format */
                        for ( j = 0 ; j < 3 ; j++ ) {
-                               if ( regs[j] < 0 ) regs[j] = 0;
+                               if (regs[j] & (1 << 31)) regs[j] = 0;
                        }
 
                        /* Byte 0 is level count, not a descriptor */
@@ -451,8 +452,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
 }
 
 /* pointer to _cpuid4_info array (for each cache leaf) */
-static struct _cpuid4_info *cpuid4_info[NR_CPUS];
-#define CPUID4_INFO_IDX(x,y)    (&((cpuid4_info[x])[y]))
+static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
+#define CPUID4_INFO_IDX(x, y)    (&((per_cpu(cpuid4_info, x))[y]))
 
 #ifdef CONFIG_SMP
 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
@@ -474,7 +475,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
                        if (cpu_data(i).apicid >> index_msb ==
                            c->apicid >> index_msb) {
                                cpu_set(i, this_leaf->shared_cpu_map);
-                               if (i != cpu && cpuid4_info[i])  {
+                               if (i != cpu && per_cpu(cpuid4_info, i))  {
                                        sibling_leaf = CPUID4_INFO_IDX(i, index);
                                        cpu_set(cpu, sibling_leaf->shared_cpu_map);
                                }
@@ -505,8 +506,8 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
        for (i = 0; i < num_cache_leaves; i++)
                cache_remove_shared_cpu_map(cpu, i);
 
-       kfree(cpuid4_info[cpu]);
-       cpuid4_info[cpu] = NULL;
+       kfree(per_cpu(cpuid4_info, cpu));
+       per_cpu(cpuid4_info, cpu) = NULL;
 }
 
 static int __cpuinit detect_cache_attributes(unsigned int cpu)
@@ -519,13 +520,13 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
        if (num_cache_leaves == 0)
                return -ENOENT;
 
-       cpuid4_info[cpu] = kzalloc(
+       per_cpu(cpuid4_info, cpu) = kzalloc(
            sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
-       if (cpuid4_info[cpu] == NULL)
+       if (per_cpu(cpuid4_info, cpu) == NULL)
                return -ENOMEM;
 
        oldmask = current->cpus_allowed;
-       retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
+       retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
        if (retval)
                goto out;
 
@@ -542,12 +543,12 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
                }
                cache_shared_cpu_map_setup(cpu, j);
        }
-       set_cpus_allowed(current, oldmask);
+       set_cpus_allowed_ptr(current, &oldmask);
 
 out:
        if (retval) {
-               kfree(cpuid4_info[cpu]);
-               cpuid4_info[cpu] = NULL;
+               kfree(per_cpu(cpuid4_info, cpu));
+               per_cpu(cpuid4_info, cpu) = NULL;
        }
 
        return retval;
@@ -561,7 +562,7 @@ out:
 extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
 
 /* pointer to kobject for cpuX/cache */
-static struct kobject * cache_kobject[NR_CPUS];
+static DEFINE_PER_CPU(struct kobject *, cache_kobject);
 
 struct _index_kobject {
        struct kobject kobj;
@@ -570,8 +571,8 @@ struct _index_kobject {
 };
 
 /* pointer to array of kobjects for cpuX/cache/indexY */
-static struct _index_kobject *index_kobject[NR_CPUS];
-#define INDEX_KOBJECT_PTR(x,y)    (&((index_kobject[x])[y]))
+static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
+#define INDEX_KOBJECT_PTR(x, y)    (&((per_cpu(index_kobject, x))[y]))
 
 #define show_one_plus(file_name, object, val)                          \
 static ssize_t show_##file_name                                                \
@@ -591,11 +592,32 @@ static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
        return sprintf (buf, "%luK\n", this_leaf->size / 1024);
 }
 
-static ssize_t show_shared_cpu_map(struct _cpuid4_info *this_leaf, char *buf)
+static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
+                                       int type, char *buf)
 {
-       char mask_str[NR_CPUS];
-       cpumask_scnprintf(mask_str, NR_CPUS, this_leaf->shared_cpu_map);
-       return sprintf(buf, "%s\n", mask_str);
+       ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
+       int n = 0;
+
+       if (len > 1) {
+               cpumask_t *mask = &this_leaf->shared_cpu_map;
+
+               n = type?
+                       cpulist_scnprintf(buf, len-2, *mask):
+                       cpumask_scnprintf(buf, len-2, *mask);
+               buf[n++] = '\n';
+               buf[n] = '\0';
+       }
+       return n;
+}
+
+static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
+{
+       return show_shared_cpu_map_func(leaf, 0, buf);
+}
+
+static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
+{
+       return show_shared_cpu_map_func(leaf, 1, buf);
 }
 
 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) {
@@ -633,6 +655,7 @@ define_one_ro(ways_of_associativity);
 define_one_ro(number_of_sets);
 define_one_ro(size);
 define_one_ro(shared_cpu_map);
+define_one_ro(shared_cpu_list);
 
 static struct attribute * default_attrs[] = {
        &type.attr,
@@ -643,6 +666,7 @@ static struct attribute * default_attrs[] = {
        &number_of_sets.attr,
        &size.attr,
        &shared_cpu_map.attr,
+       &shared_cpu_list.attr,
        NULL
 };
 
@@ -684,10 +708,10 @@ static struct kobj_type ktype_percpu_entry = {
 
 static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
 {
-       kfree(cache_kobject[cpu]);
-       kfree(index_kobject[cpu]);
-       cache_kobject[cpu] = NULL;
-       index_kobject[cpu] = NULL;
+       kfree(per_cpu(cache_kobject, cpu));
+       kfree(per_cpu(index_kobject, cpu));
+       per_cpu(cache_kobject, cpu) = NULL;
+       per_cpu(index_kobject, cpu) = NULL;
        free_cache_attributes(cpu);
 }
 
@@ -703,13 +727,14 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
                return err;
 
        /* Allocate all required memory */
-       cache_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
-       if (unlikely(cache_kobject[cpu] == NULL))
+       per_cpu(cache_kobject, cpu) =
+               kzalloc(sizeof(struct kobject), GFP_KERNEL);
+       if (unlikely(per_cpu(cache_kobject, cpu) == NULL))
                goto err_out;
 
-       index_kobject[cpu] = kzalloc(
+       per_cpu(index_kobject, cpu) = kzalloc(
            sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
-       if (unlikely(index_kobject[cpu] == NULL))
+       if (unlikely(per_cpu(index_kobject, cpu) == NULL))
                goto err_out;
 
        return 0;
@@ -733,10 +758,9 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
        if (unlikely(retval < 0))
                return retval;
 
-       cache_kobject[cpu]->parent = &sys_dev->kobj;
-       kobject_set_name(cache_kobject[cpu], "%s", "cache");
-       cache_kobject[cpu]->ktype = &ktype_percpu_entry;
-       retval = kobject_register(cache_kobject[cpu]);
+       retval = kobject_init_and_add(per_cpu(cache_kobject, cpu),
+                                     &ktype_percpu_entry,
+                                     &sys_dev->kobj, "%s", "cache");
        if (retval < 0) {
                cpuid4_cache_sysfs_exit(cpu);
                return retval;
@@ -746,23 +770,24 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
                this_object = INDEX_KOBJECT_PTR(cpu,i);
                this_object->cpu = cpu;
                this_object->index = i;
-               this_object->kobj.parent = cache_kobject[cpu];
-               kobject_set_name(&(this_object->kobj), "index%1lu", i);
-               this_object->kobj.ktype = &ktype_cache;
-               retval = kobject_register(&(this_object->kobj));
+               retval = kobject_init_and_add(&(this_object->kobj),
+                                             &ktype_cache,
+                                             per_cpu(cache_kobject, cpu),
+                                             "index%1lu", i);
                if (unlikely(retval)) {
                        for (j = 0; j < i; j++) {
-                               kobject_unregister(
-                                       &(INDEX_KOBJECT_PTR(cpu,j)->kobj));
+                               kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj));
                        }
-                       kobject_unregister(cache_kobject[cpu]);
+                       kobject_put(per_cpu(cache_kobject, cpu));
                        cpuid4_cache_sysfs_exit(cpu);
                        break;
                }
+               kobject_uevent(&(this_object->kobj), KOBJ_ADD);
        }
        if (!retval)
                cpu_set(cpu, cache_dev_map);
 
+       kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
        return retval;
 }
 
@@ -771,15 +796,15 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
        unsigned int cpu = sys_dev->id;
        unsigned long i;
 
-       if (cpuid4_info[cpu] == NULL)
+       if (per_cpu(cpuid4_info, cpu) == NULL)
                return;
        if (!cpu_isset(cpu, cache_dev_map))
                return;
        cpu_clear(cpu, cache_dev_map);
 
        for (i = 0; i < num_cache_leaves; i++)
-               kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
-       kobject_unregister(cache_kobject[cpu]);
+               kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
+       kobject_put(per_cpu(cache_kobject, cpu));
        cpuid4_cache_sysfs_exit(cpu);
 }