]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/sparc/kernel/smp_64.c
sparc64: Fix MM refcount check in smp_flush_tlb_pending().
[linux-2.6-omap-h63xx.git] / arch / sparc / kernel / smp_64.c
index 46329799f3462bb4002024558be74f204e679f07..79457f682b5ac05b693d185223ddd7ea8f9335b6 100644 (file)
@@ -449,7 +449,7 @@ again:
        __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
                             : : "r" (pstate));
        if (stuck == 0) {
-               printk("CPU[%d]: mondo stuckage result[%016lx]\n",
+               printk("CPU[%d]: mondo stuckage result[%016llx]\n",
                       smp_processor_id(), result);
        } else {
                udelay(2);
@@ -584,7 +584,7 @@ retry:
                        /* Busy bits will not clear, continue instead
                         * of freezing up on this cpu.
                         */
-                       printk("CPU[%d]: mondo stuckage result[%016lx]\n",
+                       printk("CPU[%d]: mondo stuckage result[%016llx]\n",
                               smp_processor_id(), dispatch_stat);
                } else {
                        int i, this_busy_nack = 0;
@@ -1031,7 +1031,7 @@ void smp_fetch_global_regs(void)
  *    If the address space is non-shared (ie. mm->count == 1) we avoid
  *    cross calls when we want to flush the currently running process's
  *    tlb state.  This is done by clearing all cpu bits except the current
- *    processor's in current->active_mm->cpu_vm_mask and performing the
+ *    processor's in current->mm->cpu_vm_mask and performing the
  *    flush locally only.  This will force any subsequent cpus which run
  *    this task to flush the context from the local tlb if the process
  *    migrates to another cpu (again).
@@ -1074,7 +1074,7 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long
        u32 ctx = CTX_HWBITS(mm->context);
        int cpu = get_cpu();
 
-       if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
+       if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
                mm->cpu_vm_mask = cpumask_of_cpu(cpu);
        else
                smp_cross_call_masked(&xcall_flush_tlb_pending,