struct cpuinfo_x86 *c = &cpu_data(cpu);
 
        cpumask_t saved_mask;
-       cpumask_of_cpu_ptr(new_mask, cpu);
        int retval;
        unsigned int eax, ebx, ecx, edx;
        unsigned int edx_part;
 
        /* Make sure we are running on right CPU */
        saved_mask = current->cpus_allowed;
-       retval = set_cpus_allowed_ptr(current, new_mask);
+       retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
        if (retval)
                return -1;
 
 
 static void drv_write(struct drv_cmd *cmd)
 {
        cpumask_t saved_mask = current->cpus_allowed;
-       cpumask_of_cpu_ptr_declare(cpu_mask);
        unsigned int i;
 
        for_each_cpu_mask_nr(i, cmd->mask) {
-               cpumask_of_cpu_ptr_next(cpu_mask, i);
-               set_cpus_allowed_ptr(current, cpu_mask);
+               set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
                do_drv_write(cmd);
        }
 
        } aperf_cur, mperf_cur;
 
        cpumask_t saved_mask;
-       cpumask_of_cpu_ptr(cpu_mask, cpu);
        unsigned int perf_percent;
        unsigned int retval;
 
        saved_mask = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, cpu_mask);
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
        if (get_cpu() != cpu) {
                /* We were not able to run on requested processor */
                put_cpu();
 
 static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
 {
-       cpumask_of_cpu_ptr(cpu_mask, cpu);
        struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu);
        unsigned int freq;
        unsigned int cached_freq;
        }
 
        cached_freq = data->freq_table[data->acpi_data->state].frequency;
-       freq = extract_freq(get_cur_val(cpu_mask), data);
+       freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data);
        if (freq != cached_freq) {
                /*
                 * The dreaded BIOS frequency change behind our back.
 
 static int check_supported_cpu(unsigned int cpu)
 {
        cpumask_t oldmask;
-       cpumask_of_cpu_ptr(cpu_mask, cpu);
        u32 eax, ebx, ecx, edx;
        unsigned int rc = 0;
 
        oldmask = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, cpu_mask);
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
 
        if (smp_processor_id() != cpu) {
                printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu);
 static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation)
 {
        cpumask_t oldmask;
-       cpumask_of_cpu_ptr(cpu_mask, pol->cpu);
        struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
        u32 checkfid;
        u32 checkvid;
 
        /* only run on specific CPU from here on */
        oldmask = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, cpu_mask);
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
 
        if (smp_processor_id() != pol->cpu) {
                printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
 {
        struct powernow_k8_data *data;
        cpumask_t oldmask;
-       cpumask_of_cpu_ptr_declare(newmask);
        int rc;
 
        if (!cpu_online(pol->cpu))
 
        /* only run on specific CPU from here on */
        oldmask = current->cpus_allowed;
-       cpumask_of_cpu_ptr_next(newmask, pol->cpu);
-       set_cpus_allowed_ptr(current, newmask);
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
 
        if (smp_processor_id() != pol->cpu) {
                printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
        set_cpus_allowed_ptr(current, &oldmask);
 
        if (cpu_family == CPU_HW_PSTATE)
-               pol->cpus = *newmask;
+               pol->cpus = cpumask_of_cpu(pol->cpu);
        else
                pol->cpus = per_cpu(cpu_core_map, pol->cpu);
        data->available_cores = &(pol->cpus);
 {
        struct powernow_k8_data *data;
        cpumask_t oldmask = current->cpus_allowed;
-       cpumask_of_cpu_ptr(newmask, cpu);
        unsigned int khz = 0;
        unsigned int first;
 
        if (!data)
                return -EINVAL;
 
-       set_cpus_allowed_ptr(current, newmask);
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
        if (smp_processor_id() != cpu) {
                printk(KERN_ERR PFX
                        "limiting to CPU %d failed in powernowk8_get\n", cpu);
 
        unsigned l, h;
        unsigned clock_freq;
        cpumask_t saved_mask;
-       cpumask_of_cpu_ptr(new_mask, cpu);
 
        saved_mask = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, new_mask);
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
        if (smp_processor_id() != cpu)
                return 0;
 
                 * Best effort undo..
                 */
 
-               if (!cpus_empty(*covered_cpus)) {
-                       cpumask_of_cpu_ptr_declare(new_mask);
-
+               if (!cpus_empty(*covered_cpus))
                        for_each_cpu_mask_nr(j, *covered_cpus) {
-                               cpumask_of_cpu_ptr_next(new_mask, j);
-                               set_cpus_allowed_ptr(current, new_mask);
+                               set_cpus_allowed_ptr(current,
+                                                    &cpumask_of_cpu(j));
                                wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
                        }
-               }
 
                tmp = freqs.new;
                freqs.new = freqs.old;
 
 
 static unsigned int speedstep_get(unsigned int cpu)
 {
-       cpumask_of_cpu_ptr(newmask, cpu);
-       return _speedstep_get(newmask);
+       return _speedstep_get(&cpumask_of_cpu(cpu));
 }
 
 /**
 
        unsigned long           j;
        int                     retval;
        cpumask_t               oldmask;
-       cpumask_of_cpu_ptr(newmask, cpu);
 
        if (num_cache_leaves == 0)
                return -ENOENT;
                return -ENOMEM;
 
        oldmask = current->cpus_allowed;
-       retval = set_cpus_allowed_ptr(current, newmask);
+       retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
        if (retval)
                goto out;
 
 
 
        if (reload) {
 #ifdef CONFIG_SMP
-               cpumask_of_cpu_ptr_declare(mask);
-
                preempt_disable();
                load_LDT(pc);
-               cpumask_of_cpu_ptr_next(mask, smp_processor_id());
-               if (!cpus_equal(current->mm->cpu_vm_mask, *mask))
+               if (!cpus_equal(current->mm->cpu_vm_mask,
+                               cpumask_of_cpu(smp_processor_id())))
                        smp_call_function(flush_ldt, current->mm, 1);
                preempt_enable();
 #else
 
        void *new_mc = NULL;
        int cpu;
        cpumask_t old;
-       cpumask_of_cpu_ptr_declare(newmask);
 
        old = current->cpus_allowed;
 
 
                        if (!uci->valid)
                                continue;
-                       cpumask_of_cpu_ptr_next(newmask, cpu);
-                       set_cpus_allowed_ptr(current, newmask);
+                       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
                        error = get_maching_microcode(new_mc, cpu);
                        if (error < 0)
                                goto out;
        struct cpuinfo_x86 *c = &cpu_data(cpu);
        struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
        cpumask_t old;
-       cpumask_of_cpu_ptr(newmask, cpu);
        unsigned int val[2];
        int err = 0;
 
                return 0;
 
        old = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, newmask);
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
 
        /* Check if the microcode we have in memory matches the CPU */
        if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
 static void microcode_init_cpu(int cpu, int resume)
 {
        cpumask_t old;
-       cpumask_of_cpu_ptr(newmask, cpu);
        struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
 
        old = current->cpus_allowed;
 
-       set_cpus_allowed_ptr(current, newmask);
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
        mutex_lock(µcode_mutex);
        collect_cpu_info(cpu);
        if (uci->valid && system_state == SYSTEM_RUNNING && !resume)
        if (end == buf)
                return -EINVAL;
        if (val == 1) {
-               cpumask_t old;
-               cpumask_of_cpu_ptr(newmask, cpu);
-
-               old = current->cpus_allowed;
+               cpumask_t old = current->cpus_allowed;
 
                get_online_cpus();
-               set_cpus_allowed_ptr(current, newmask);
+               set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
 
                mutex_lock(µcode_mutex);
                if (uci->valid)
 
 
        /* The boot cpu is always logical cpu 0 */
        int reboot_cpu_id = 0;
-       cpumask_of_cpu_ptr(newmask, reboot_cpu_id);
 
 #ifdef CONFIG_X86_32
        /* See if there has been given a command line override */
        if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) &&
-               cpu_online(reboot_cpu)) {
+               cpu_online(reboot_cpu))
                reboot_cpu_id = reboot_cpu;
-               cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id);
-       }
 #endif
 
        /* Make certain the cpu I'm about to reboot on is online */
-       if (!cpu_online(reboot_cpu_id)) {
+       if (!cpu_online(reboot_cpu_id))
                reboot_cpu_id = smp_processor_id();
-               cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id);
-       }
 
        /* Make certain I only run on the appropriate processor */
-       set_cpus_allowed_ptr(current, newmask);
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id));
 
        /* O.K Now that I'm on the appropriate processor,
         * stop all of the others.
 
 static int acpi_processor_get_throttling(struct acpi_processor *pr)
 {
        cpumask_t saved_mask;
-       cpumask_of_cpu_ptr_declare(new_mask);
        int ret;
 
        if (!pr)
         * Migrate task to the cpu pointed by pr.
         */
        saved_mask = current->cpus_allowed;
-       cpumask_of_cpu_ptr_next(new_mask, pr->id);
-       set_cpus_allowed_ptr(current, new_mask);
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
        ret = pr->throttling.acpi_processor_get_throttling(pr);
        /* restore the previous state */
        set_cpus_allowed_ptr(current, &saved_mask);
 int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
 {
        cpumask_t saved_mask;
-       cpumask_of_cpu_ptr_declare(new_mask);
        int ret = 0;
        unsigned int i;
        struct acpi_processor *match_pr;
         * it can be called only for the cpu pointed by pr.
         */
        if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
-               cpumask_of_cpu_ptr_next(new_mask, pr->id);
-               set_cpus_allowed_ptr(current, new_mask);
+               set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
                ret = p_throttling->acpi_processor_set_throttling(pr,
                                                t_state.target_state);
        } else {
                                continue;
                        }
                        t_state.cpu = i;
-                       cpumask_of_cpu_ptr_next(new_mask, i);
-                       set_cpus_allowed_ptr(current, new_mask);
+                       set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
                        ret = match_pr->throttling.
                                acpi_processor_set_throttling(
                                match_pr, t_state.target_state);
 
 static int smi_request(struct smi_cmd *smi_cmd)
 {
        cpumask_t old_mask;
-       cpumask_of_cpu_ptr(new_mask, 0);
        int ret = 0;
 
        if (smi_cmd->magic != SMI_CMD_MAGIC) {
 
        /* SMI requires CPU 0 */
        old_mask = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, new_mask);
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(0));
        if (smp_processor_id() != 0) {
                dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
                        __func__);
 
        int last_IRQ_count = 0;
        int new_IRQ_count;
        int force_IRQ = 0;
-       cpumask_of_cpu_ptr(cpumask, XPC_HB_CHECK_CPU);
 
        /* this thread was marked active by xpc_hb_init() */
 
-       set_cpus_allowed_ptr(current, cpumask);
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(XPC_HB_CHECK_CPU));
 
        /* set our heartbeating to other partitions into motion */
        xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
 
 {
        int irqs_disabled = 0;
        int prepared = 0;
-       cpumask_of_cpu_ptr(cpumask, (int)(long)cpu);
 
-       set_cpus_allowed_ptr(current, cpumask);
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu((int)(long)cpu));
 
        /* Ack: we are alive */
        smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
 
        struct tick_device *td;
        int cpu, ret = NOTIFY_OK;
        unsigned long flags;
-       cpumask_of_cpu_ptr_declare(cpumask);
 
        spin_lock_irqsave(&tick_device_lock, flags);
 
        cpu = smp_processor_id();
-       cpumask_of_cpu_ptr_next(cpumask, cpu);
        if (!cpu_isset(cpu, newdev->cpumask))
                goto out_bc;
 
        curdev = td->evtdev;
 
        /* cpu local device ? */
-       if (!cpus_equal(newdev->cpumask, *cpumask)) {
+       if (!cpus_equal(newdev->cpumask, cpumask_of_cpu(cpu))) {
 
                /*
                 * If the cpu affinity of the device interrupt can not
                 * If we have a cpu local device already, do not replace it
                 * by a non cpu local device
                 */
-               if (curdev && cpus_equal(curdev->cpumask, *cpumask))
+               if (curdev && cpus_equal(curdev->cpumask, cpumask_of_cpu(cpu)))
                        goto out_bc;
        }
 
                curdev = NULL;
        }
        clockevents_exchange_device(curdev, newdev);
-       tick_setup_device(td, newdev, cpu, cpumask);
+       tick_setup_device(td, newdev, cpu, &cpumask_of_cpu(cpu));
        if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
                tick_oneshot_notify();
 
 
        int cpu;
 
        for_each_online_cpu(cpu) {
-               cpumask_of_cpu_ptr(new_mask, cpu);
-
-               set_cpus_allowed_ptr(current, new_mask);
+               set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
                start_stack_timer(cpu);
        }
        set_cpus_allowed_ptr(current, &saved_mask);
 
 {
        unsigned long preempt_count = preempt_count();
        int this_cpu = raw_smp_processor_id();
-       cpumask_of_cpu_ptr_declare(this_mask);
 
        if (likely(preempt_count))
                goto out;
         * Kernel threads bound to a single CPU can safely use
         * smp_processor_id():
         */
-       cpumask_of_cpu_ptr_next(this_mask, this_cpu);
-
-       if (cpus_equal(current->cpus_allowed, *this_mask))
+       if (cpus_equal(current->cpus_allowed, cpumask_of_cpu(this_cpu)))
                goto out;
 
        /*
 
        switch (m->mode) {
        case SVC_POOL_PERCPU:
        {
-               cpumask_of_cpu_ptr(cpumask, node);
-               set_cpus_allowed_ptr(task, cpumask);
+               set_cpus_allowed_ptr(task, &cpumask_of_cpu(node));
                break;
        }
        case SVC_POOL_PERNODE: