#ifdef CONFIG_HOTPLUG_CPU
        /* cpufreq holds the hotplug lock, so we are safe from here on */
-       cpus_and(online_policy_cpus, cpu_online_map, policy->cpus);
+       cpumask_and(&online_policy_cpus, cpu_online_mask, policy->cpus);
 #else
        online_policy_cpus = policy->cpus;
 #endif
         */
        if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
            policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
-               cpumask_copy(&policy->cpus, perf->shared_cpu_map);
+               cpumask_copy(policy->cpus, perf->shared_cpu_map);
        }
-       cpumask_copy(&policy->related_cpus, perf->shared_cpu_map);
+       cpumask_copy(policy->related_cpus, perf->shared_cpu_map);
 
 #ifdef CONFIG_SMP
        dmi_check_system(sw_any_bug_dmi_table);
-       if (bios_with_sw_any_bug && cpus_weight(policy->cpus) == 1) {
+       if (bios_with_sw_any_bug && cpumask_weight(policy->cpus) == 1) {
                policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
-               policy->cpus = per_cpu(cpu_core_map, cpu);
+               cpumask_copy(policy->cpus, cpu_core_mask(cpu));
        }
 #endif
 
 
                return 0;
 
        /* notifiers */
-       for_each_cpu_mask_nr(i, policy->cpus) {
+       for_each_cpu(i, policy->cpus) {
                freqs.cpu = i;
                cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
        }
        /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software
         * Developer's Manual, Volume 3
         */
-       for_each_cpu_mask_nr(i, policy->cpus)
+       for_each_cpu(i, policy->cpus)
                cpufreq_p4_setdc(i, p4clockmod_table[newstate].index);
 
        /* notifiers */
-       for_each_cpu_mask_nr(i, policy->cpus) {
+       for_each_cpu(i, policy->cpus) {
                freqs.cpu = i;
                cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
        }
        unsigned int i;
 
 #ifdef CONFIG_SMP
-       policy->cpus = per_cpu(cpu_sibling_map, policy->cpu);
+       cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu));
 #endif
 
        /* Errata workaround */
 
        set_cpus_allowed_ptr(current, &oldmask);
 
        if (cpu_family == CPU_HW_PSTATE)
-               pol->cpus = cpumask_of_cpu(pol->cpu);
+               cpumask_copy(pol->cpus, cpumask_of(pol->cpu));
        else
-               pol->cpus = per_cpu(cpu_core_map, pol->cpu);
-       data->available_cores = &(pol->cpus);
+               cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu));
+       data->available_cores = pol->cpus;
 
        /* Take a crude guess here.
         * That guess was in microseconds, so multiply with 1000 */
 
        /* we need to keep track of associated cores, but let cpufreq
         * handle hotplug events - so just point at cpufreq pol->cpus
         * structure */
-       cpumask_t *available_cores;
+       struct cpumask *available_cores;
 };
 
 
 
        }
 
        first_cpu = 1;
-       for_each_cpu_mask_nr(j, policy->cpus) {
-               const cpumask_t *mask;
+       for_each_cpu(j, policy->cpus) {
+               const struct cpumask *mask;
 
                /* cpufreq holds the hotplug lock, so we are safe here */
                if (!cpu_online(j))
                 * Make sure we are running on CPU that wants to change freq
                 */
                if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
-                       mask = &policy->cpus;
+                       mask = policy->cpus;
                else
-                       mask = &cpumask_of_cpu(j);
+                       mask = cpumask_of(j);
 
                set_cpus_allowed_ptr(current, mask);
                preempt_disable();
                        dprintk("target=%dkHz old=%d new=%d msr=%04x\n",
                                target_freq, freqs.old, freqs.new, msr);
 
-                       for_each_cpu_mask_nr(k, policy->cpus) {
+                       for_each_cpu(k, policy->cpus) {
                                if (!cpu_online(k))
                                        continue;
                                freqs.cpu = k;
                preempt_enable();
        }
 
-       for_each_cpu_mask_nr(k, policy->cpus) {
+       for_each_cpu(k, policy->cpus) {
                if (!cpu_online(k))
                        continue;
                freqs.cpu = k;
                tmp = freqs.new;
                freqs.new = freqs.old;
                freqs.old = tmp;
-               for_each_cpu_mask_nr(j, policy->cpus) {
+               for_each_cpu(j, policy->cpus) {
                        if (!cpu_online(j))
                                continue;
                        cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
 
        return 0;
 }
 
-static unsigned int _speedstep_get(const cpumask_t *cpus)
+static unsigned int _speedstep_get(const struct cpumask *cpus)
 {
        unsigned int speed;
        cpumask_t cpus_allowed;
 
 static unsigned int speedstep_get(unsigned int cpu)
 {
-       return _speedstep_get(&cpumask_of_cpu(cpu));
+       return _speedstep_get(cpumask_of(cpu));
 }
 
 /**
        if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate))
                return -EINVAL;
 
-       freqs.old = _speedstep_get(&policy->cpus);
+       freqs.old = _speedstep_get(policy->cpus);
        freqs.new = speedstep_freqs[newstate].frequency;
        freqs.cpu = policy->cpu;
 
 
        cpus_allowed = current->cpus_allowed;
 
-       for_each_cpu_mask_nr(i, policy->cpus) {
+       for_each_cpu(i, policy->cpus) {
                freqs.cpu = i;
                cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
        }
 
        /* switch to physical CPU where state is to be changed */
-       set_cpus_allowed_ptr(current, &policy->cpus);
+       set_cpus_allowed_ptr(current, policy->cpus);
 
        speedstep_set_state(newstate);
 
        /* allow to be run on all CPUs */
        set_cpus_allowed_ptr(current, &cpus_allowed);
 
-       for_each_cpu_mask_nr(i, policy->cpus) {
+       for_each_cpu(i, policy->cpus) {
                freqs.cpu = i;
                cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
        }
 
        /* only run on CPU to be set, or on its sibling */
 #ifdef CONFIG_SMP
-       policy->cpus = per_cpu(cpu_sibling_map, policy->cpu);
+       cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu));
 #endif
 
        cpus_allowed = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, &policy->cpus);
+       set_cpus_allowed_ptr(current, policy->cpus);
 
        /* detect low and high frequency and transition latency */
        result = speedstep_get_freqs(speedstep_processor,
                return result;
 
        /* get current speed setting */
-       speed = _speedstep_get(&policy->cpus);
+       speed = _speedstep_get(policy->cpus);
        if (!speed)
                return -EIO;
 
 
        return i;
 }
 
-static ssize_t show_cpus(cpumask_t mask, char *buf)
+static ssize_t show_cpus(const struct cpumask *mask, char *buf)
 {
        ssize_t i = 0;
        unsigned int cpu;
 
-       for_each_cpu_mask_nr(cpu, mask) {
+       for_each_cpu(cpu, mask) {
                if (i)
                        i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
                i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
  */
 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
 {
-       if (cpus_empty(policy->related_cpus))
+       if (cpumask_empty(policy->related_cpus))
                return show_cpus(policy->cpus, buf);
        return show_cpus(policy->related_cpus, buf);
 }
                ret = -ENOMEM;
                goto nomem_out;
        }
+       if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) {
+               kfree(policy);
+               ret = -ENOMEM;
+               goto nomem_out;
+       }
+       if (!alloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) {
+               free_cpumask_var(policy->cpus);
+               kfree(policy);
+               ret = -ENOMEM;
+               goto nomem_out;
+       }
 
        policy->cpu = cpu;
-       policy->cpus = cpumask_of_cpu(cpu);
+       cpumask_copy(policy->cpus, cpumask_of(cpu));
 
        /* Initially set CPU itself as the policy_cpu */
        per_cpu(policy_cpu, cpu) = cpu;
        }
 #endif
 
-       for_each_cpu_mask_nr(j, policy->cpus) {
+       for_each_cpu(j, policy->cpus) {
                if (cpu == j)
                        continue;
 
                                goto err_out_driver_exit;
 
                        spin_lock_irqsave(&cpufreq_driver_lock, flags);
-                       managed_policy->cpus = policy->cpus;
+                       cpumask_copy(managed_policy->cpus, policy->cpus);
                        per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
                        spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
        }
 
        spin_lock_irqsave(&cpufreq_driver_lock, flags);
-       for_each_cpu_mask_nr(j, policy->cpus) {
+       for_each_cpu(j, policy->cpus) {
                per_cpu(cpufreq_cpu_data, j) = policy;
                per_cpu(policy_cpu, j) = policy->cpu;
        }
        spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
        /* symlink affected CPUs */
-       for_each_cpu_mask_nr(j, policy->cpus) {
+       for_each_cpu(j, policy->cpus) {
                if (j == cpu)
                        continue;
                if (!cpu_online(j))
 
 err_out_unregister:
        spin_lock_irqsave(&cpufreq_driver_lock, flags);
-       for_each_cpu_mask_nr(j, policy->cpus)
+       for_each_cpu(j, policy->cpus)
                per_cpu(cpufreq_cpu_data, j) = NULL;
        spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
         */
        if (unlikely(cpu != data->cpu)) {
                dprintk("removing link\n");
-               cpu_clear(cpu, data->cpus);
+               cpumask_clear_cpu(cpu, data->cpus);
                spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
                sysfs_remove_link(&sys_dev->kobj, "cpufreq");
                cpufreq_cpu_put(data);
         * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
         * the sysfs links afterwards.
         */
-       if (unlikely(cpus_weight(data->cpus) > 1)) {
-               for_each_cpu_mask_nr(j, data->cpus) {
+       if (unlikely(cpumask_weight(data->cpus) > 1)) {
+               for_each_cpu(j, data->cpus) {
                        if (j == cpu)
                                continue;
                        per_cpu(cpufreq_cpu_data, j) = NULL;
 
        spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
-       if (unlikely(cpus_weight(data->cpus) > 1)) {
-               for_each_cpu_mask_nr(j, data->cpus) {
+       if (unlikely(cpumask_weight(data->cpus) > 1)) {
+               for_each_cpu(j, data->cpus) {
                        if (j == cpu)
                                continue;
                        dprintk("removing link for cpu %u\n", j);
        if (cpufreq_driver->exit)
                cpufreq_driver->exit(data);
 
+       free_cpumask_var(data->related_cpus);
+       free_cpumask_var(data->cpus);
        kfree(data);
+       per_cpu(cpufreq_cpu_data, cpu) = NULL;
 
        cpufreq_debug_enable_ratelimit();
        return 0;
 
                        return rc;
                }
 
-               for_each_cpu_mask_nr(j, policy->cpus) {
+               for_each_cpu(j, policy->cpus) {
                        struct cpu_dbs_info_s *j_dbs_info;
                        j_dbs_info = &per_cpu(cpu_dbs_info, j);
                        j_dbs_info->cur_policy = policy;
 
        /* Get Absolute Load - in terms of freq */
        max_load_freq = 0;
 
-       for_each_cpu_mask_nr(j, policy->cpus) {
+       for_each_cpu(j, policy->cpus) {
                struct cpu_dbs_info_s *j_dbs_info;
                cputime64_t cur_wall_time, cur_idle_time;
                unsigned int idle_time, wall_time;
                        return rc;
                }
 
-               for_each_cpu_mask_nr(j, policy->cpus) {
+               for_each_cpu(j, policy->cpus) {
                        struct cpu_dbs_info_s *j_dbs_info;
                        j_dbs_info = &per_cpu(cpu_dbs_info, j);
                        j_dbs_info->cur_policy = policy;
 
 };
 
 struct cpufreq_policy {
-       cpumask_t               cpus;   /* CPUs requiring sw coordination */
-       cpumask_t               related_cpus; /* CPUs with any coordination */
+       cpumask_var_t           cpus;   /* CPUs requiring sw coordination */
+       cpumask_var_t           related_cpus; /* CPUs with any coordination */
        unsigned int            shared_type; /* ANY or ALL affected CPUs
                                                should set cpufreq */
        unsigned int            cpu;    /* cpu nr of registered CPU */