}
  
        spin_lock_irqsave(&cpufreq_driver_lock, flags);
-       for_each_cpu_mask(j, policy->cpus) {
+       for_each_cpu_mask_nr(j, policy->cpus) {
 -              cpufreq_cpu_data[j] = policy;
 +              per_cpu(cpufreq_cpu_data, j) = policy;
                per_cpu(policy_cpu, j) = policy->cpu;
        }
        spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
  
  err_out_unregister:
        spin_lock_irqsave(&cpufreq_driver_lock, flags);
-       for_each_cpu_mask(j, policy->cpus)
+       for_each_cpu_mask_nr(j, policy->cpus)
 -              cpufreq_cpu_data[j] = NULL;
 +              per_cpu(cpufreq_cpu_data, j) = NULL;
        spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
  
        kobject_put(&policy->kobj);
  
        /* if we have other CPUs still registered, we need to unlink them,
         * or else wait_for_completion below will lock up. Clean the
 -       * cpufreq_cpu_data[] while holding the lock, and remove the sysfs
 -       * links afterwards.
 +       * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
 +       * the sysfs links afterwards.
         */
        if (unlikely(cpus_weight(data->cpus) > 1)) {
-               for_each_cpu_mask(j, data->cpus) {
+               for_each_cpu_mask_nr(j, data->cpus) {
                        if (j == cpu)
                                continue;
 -                      cpufreq_cpu_data[j] = NULL;
 +                      per_cpu(cpufreq_cpu_data, j) = NULL;
                }
        }