]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/rcutorture.c
fs/ncpfs/getopt.c: cleanup keneldoc
[linux-2.6-omap-h63xx.git] / kernel / rcutorture.c
index b31065522104f1a324404ea721489909d9a845f7..3245b40952c61548d835d3f19c9d06cf4d1aa9d1 100644 (file)
@@ -868,49 +868,52 @@ static int rcu_idle_cpu;  /* Force all torture tasks off this CPU */
  */
 static void rcu_torture_shuffle_tasks(void)
 {
-       cpumask_t tmp_mask;
+       cpumask_var_t tmp_mask;
        int i;
 
-       cpus_setall(tmp_mask);
+       if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL))
+               BUG();
+
+       cpumask_setall(tmp_mask);
        get_online_cpus();
 
        /* No point in shuffling if there is only one online CPU (ex: UP) */
-       if (num_online_cpus() == 1) {
-               put_online_cpus();
-               return;
-       }
+       if (num_online_cpus() == 1)
+               goto out;
 
        if (rcu_idle_cpu != -1)
-               cpu_clear(rcu_idle_cpu, tmp_mask);
+               cpumask_clear_cpu(rcu_idle_cpu, tmp_mask);
 
-       set_cpus_allowed_ptr(current, &tmp_mask);
+       set_cpus_allowed_ptr(current, tmp_mask);
 
        if (reader_tasks) {
                for (i = 0; i < nrealreaders; i++)
                        if (reader_tasks[i])
                                set_cpus_allowed_ptr(reader_tasks[i],
-                                                    &tmp_mask);
+                                                    tmp_mask);
        }
 
        if (fakewriter_tasks) {
                for (i = 0; i < nfakewriters; i++)
                        if (fakewriter_tasks[i])
                                set_cpus_allowed_ptr(fakewriter_tasks[i],
-                                                    &tmp_mask);
+                                                    tmp_mask);
        }
 
        if (writer_task)
-               set_cpus_allowed_ptr(writer_task, &tmp_mask);
+               set_cpus_allowed_ptr(writer_task, tmp_mask);
 
        if (stats_task)
-               set_cpus_allowed_ptr(stats_task, &tmp_mask);
+               set_cpus_allowed_ptr(stats_task, tmp_mask);
 
        if (rcu_idle_cpu == -1)
                rcu_idle_cpu = num_online_cpus() - 1;
        else
                rcu_idle_cpu--;
 
+out:
        put_online_cpus();
+       free_cpumask_var(tmp_mask);
 }
 
 /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the