/* re-check, now that we've got the lock: */
        context = mm->context;
        if (context == 0) {
-               cpus_clear(mm->cpu_vm_mask);
+               cpumask_clear(mm_cpumask(mm));
                if (ia64_ctx.next >= ia64_ctx.limit) {
                        ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
                                        ia64_ctx.max_ctx, ia64_ctx.next);
 
        do {
                context = get_mmu_context(mm);
-               if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
-                       cpu_set(smp_processor_id(), mm->cpu_vm_mask);
+               if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
+                       cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
                reload_context(context);
                /*
                 * in the unlikely event of a TLB-flush by another thread,
 
 
        preempt_disable();
 #ifdef CONFIG_SMP
-       if (mm != current->active_mm || cpus_weight(mm->cpu_vm_mask) != 1) {
+       if (mm != current->active_mm || cpumask_weight(mm_cpumask(mm)) != 1) {
                platform_global_tlb_purge(mm, start, end, nbits);
                preempt_enable();
                return;
 
        unsigned long itc;
 
        itc = ia64_get_itc();
-       smp_flush_tlb_cpumask(mm->cpu_vm_mask);
+       smp_flush_tlb_cpumask(*mm_cpumask(mm));
        itc = ia64_get_itc() - itc;
        __get_cpu_var(ptcstats).shub_ipi_flushes_itc_clocks += itc;
        __get_cpu_var(ptcstats).shub_ipi_flushes++;
        nodes_clear(nodes_flushed);
        i = 0;
 
-       for_each_cpu_mask(cpu, mm->cpu_vm_mask) {
+       for_each_cpu(cpu, mm_cpumask(mm)) {
                cnode = cpu_to_node(cpu);
                node_set(cnode, nodes_flushed);
                lcpu = cpu;