if (smp_processor_id() != boot_cpuid)
                        smp_call_function_single(boot_cpuid,
                                                 __marvel_access_rtc,
-                                                &rtc_access, 1, 1);
+                                                &rtc_access, 1);
                else
                        __marvel_access_rtc(&rtc_access);
 #else
 
                }
        }
 
-       if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) {
+       if (smp_call_function(ipi_flush_tlb_mm, mm, 1)) {
                printk(KERN_CRIT "flush_tlb_mm: timed out\n");
        }
 
        data.mm = mm;
        data.addr = addr;
 
-       if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) {
+       if (smp_call_function(ipi_flush_tlb_page, &data, 1)) {
                printk(KERN_CRIT "flush_tlb_page: timed out\n");
        }
 
                }
        }
 
-       if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) {
+       if (smp_call_function(ipi_flush_icache_page, mm, 1)) {
                printk(KERN_CRIT "flush_icache_page: timed out\n");
        }
 
 
        model->reg_setup(®, ctr, &sys);
 
        /* Configure the registers on all cpus.  */
-       (void)smp_call_function(model->cpu_setup, ®, 0, 1);
+       (void)smp_call_function(model->cpu_setup, ®, 1);
        model->cpu_setup(®);
        return 0;
 }
 static int
 op_axp_start(void)
 {
-       (void)smp_call_function(op_axp_cpu_start, NULL, 0, 1);
+       (void)smp_call_function(op_axp_cpu_start, NULL, 1);
        op_axp_cpu_start(NULL);
        return 0;
 }
 static void
 op_axp_stop(void)
 {
-       (void)smp_call_function(op_axp_cpu_stop, NULL, 0, 1);
+       (void)smp_call_function(op_axp_cpu_stop, NULL, 1);
        op_axp_cpu_stop(NULL);
 }
 
 
        data.ret = 0;
 
        preempt_disable();
-       smp_call_function(em_func, &data, 1, 1);
+       smp_call_function(em_func, &data, 1);
        em_func(&data);
        preempt_enable();
 
 
        else if (vfpsid & FPSID_NODOUBLE) {
                printk("no double precision support\n");
        } else {
-               smp_call_function(vfp_enable, NULL, 1, 1);
+               smp_call_function(vfp_enable, NULL, 1);
 
                VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT;  /* Extract the architecture version */
                printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
 
 /* Other calls */
 void smp_send_stop(void)
 {
-       smp_call_function(stop_this_cpu, NULL, 1, 0);
+       smp_call_function(stop_this_cpu, NULL, 0);
 }
 
 int setup_profiling_timer(unsigned int multiplier)
  * You must not call this function with disabled interrupts or from a
  * hardware interrupt handler or from a bottom half handler.
  */
-int smp_call_function(void (*func)(void *info), void *info,
-                     int nonatomic, int wait)
+int smp_call_function(void (*func)(void *info), void *info, int wait)
 {
        cpumask_t cpu_mask = CPU_MASK_ALL;
        struct call_data_struct data;
 
        case CPU_ONLINE:
        case CPU_ONLINE_FROZEN:
                smp_call_function_single(hotcpu, ia64_mca_cmc_vector_adjust,
-                                        NULL, 1, 0);
+                                        NULL, 0);
                break;
        }
        return NOTIFY_OK;
 
 
 
        /* will send IPI to other CPU and wait for completion of remote call */
-       if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 0, 1))) {
+       if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 1))) {
                printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: "
                       "error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret);
                return 0;
 
        int ret;
 
        DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
-       ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1);
+       ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1);
        DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
 }
 #endif /* CONFIG_SMP */
 
 {
        smp_mb();
        /* kick all the CPUs so that they exit out of pm_idle */
-       smp_call_function(do_nothing, NULL, 0, 1);
+       smp_call_function(do_nothing, NULL, 1);
 }
 EXPORT_SYMBOL_GPL(cpu_idle_wait);
 
 
 
        go[MASTER] = 1;
 
-       if (smp_call_function_single(master, sync_master, NULL, 1, 0) < 0) {
+       if (smp_call_function_single(master, sync_master, NULL, 0) < 0) {
                printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master);
                return;
        }
 
        status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
        if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
                atomic_set(&uc_pool->status, 0);
-               status = smp_call_function(uncached_ipi_visibility, uc_pool,
-                                          0, 1);
+               status = smp_call_function(uncached_ipi_visibility, uc_pool, 1);
                if (status || atomic_read(&uc_pool->status))
                        goto failed;
        } else if (status != PAL_VISIBILITY_OK)
        if (status != PAL_STATUS_SUCCESS)
                goto failed;
        atomic_set(&uc_pool->status, 0);
-       status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 0, 1);
+       status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1);
        if (status || atomic_read(&uc_pool->status))
                goto failed;
 
 
                if (use_ipi) {
                        /* use an interprocessor interrupt to call SAL */
                        smp_call_function_single(cpu, sn_hwperf_call_sal,
-                               op_info, 1, 1);
+                               op_info, 1);
                }
                else {
                        /* migrate the task before calling SAL */ 
 
        local_irq_save(flags);
        __flush_tlb_all();
        local_irq_restore(flags);
-       smp_call_function(flush_tlb_all_ipi, NULL, 1, 1);
+       smp_call_function(flush_tlb_all_ipi, NULL, 1);
        preempt_enable();
 }
 
  *==========================================================================*/
 void smp_send_stop(void)
 {
-       smp_call_function(stop_this_cpu, NULL, 1, 0);
+       smp_call_function(stop_this_cpu, NULL, 0);
 }
 
 /*==========================================================================*
 
 
 void smp_send_stop(void)
 {
-       smp_call_function(stop_this_cpu, NULL, 1, 0);
+       smp_call_function(stop_this_cpu, NULL, 0);
 }
 
 void __init smp_cpus_done(unsigned int max_cpus)
 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
 {
 #ifndef CONFIG_MIPS_MT_SMTC
-       smp_call_function(func, info, 1, 1);
+       smp_call_function(func, info, 1);
 #endif
 }
 
 
  *    primary cache.
  */
 static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
-                                   int retry, int wait)
+                                   int wait)
 {
        preempt_disable();
 
 #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
-       smp_call_function(func, info, retry, wait);
+       smp_call_function(func, info, wait);
 #endif
        func(info);
        preempt_enable();
 
 static void r4k___flush_cache_all(void)
 {
-       r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
+       r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1);
 }
 
 static inline int has_valid_asid(const struct mm_struct *mm)
        int exec = vma->vm_flags & VM_EXEC;
 
        if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
-               r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
+               r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1);
 }
 
 static inline void local_r4k_flush_cache_mm(void * args)
        if (!cpu_has_dc_aliases)
                return;
 
-       r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
+       r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1);
 }
 
 struct flush_cache_page_args {
        args.addr = addr;
        args.pfn = pfn;
 
-       r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
+       r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1);
 }
 
 static inline void local_r4k_flush_data_cache_page(void * addr)
                local_r4k_flush_data_cache_page((void *)addr);
        else
                r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr,
-                               1, 1);
+                               1);
 }
 
 struct flush_icache_range_args {
        args.start = start;
        args.end = end;
 
-       r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
+       r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1);
        instruction_hazard();
 }
 
 
 static void r4k_flush_cache_sigtramp(unsigned long addr)
 {
-       r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
+       r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1);
 }
 
 static void r4k_flush_icache_all(void)
 
 #ifdef CONFIG_SMP
        if (smp_processor_id())
                /* CPU 1 */
-               smp_call_function(prom_cpu0_exit, NULL, 1, 1);
+               smp_call_function(prom_cpu0_exit, NULL, 1);
 #endif
        prom_cpu0_exit(NULL);
 }
 
                if (!reboot_smp) {
                        /* Get CPU 0 to do the cfe_exit */
                        reboot_smp = 1;
-                       smp_call_function(cfe_linux_exit, arg, 1, 0);
+                       smp_call_function(cfe_linux_exit, arg, 0);
                }
        } else {
                printk("Passing control back to CFE...\n");
 
 {
 #ifdef CONFIG_SMP
        if (smp_processor_id()) {
-               smp_call_function(prom_cpu0_exit, NULL, 1, 1);
+               smp_call_function(prom_cpu0_exit, NULL, 1);
        }
 #endif
        while(1);
 
 
 void smp_send_stop(void)
 {
-       smp_call_function(stop_this_cpu, NULL, 0, 0);
+       smp_call_function(stop_this_cpu, NULL, 0);
 }
 
 extern struct gettimeofday_struct do_gtod;
 
                        per_cpu(appldata_timer, i).expires = per_cpu_interval;
                        smp_call_function_single(i, add_virt_timer_periodic,
                                                 &per_cpu(appldata_timer, i),
-                                                0, 1);
+                                                1);
                }
                appldata_timer_active = 1;
                P_INFO("Monitoring timer started.\n");
                        args.timer = &per_cpu(appldata_timer, i);
                        args.expires = per_cpu_interval;
                        smp_call_function_single(i, __appldata_mod_vtimer_wrap,
-                                                &args, 0, 1);
+                                                &args, 1);
                }
        }
 }
 
 }
 
 static void __smp_call_function_map(void (*func) (void *info), void *info,
-                                   int nonatomic, int wait, cpumask_t map)
+                                   int wait, cpumask_t map)
 {
        struct call_data_struct data;
        int cpu, local = 0;
  * smp_call_function:
  * @func: the function to run; this must be fast and non-blocking
  * @info: an arbitrary pointer to pass to the function
- * @nonatomic: unused
  * @wait: if true, wait (atomically) until function has completed on other CPUs
  *
  * Run a function on all other CPUs.
  * You must not call this function with disabled interrupts, from a
  * hardware interrupt handler or from a bottom half.
  */
-int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
-                     int wait)
+int smp_call_function(void (*func) (void *info), void *info, int wait)
 {
        cpumask_t map;
 
        spin_lock(&call_lock);
        map = cpu_online_map;
        cpu_clear(smp_processor_id(), map);
-       __smp_call_function_map(func, info, nonatomic, wait, map);
+       __smp_call_function_map(func, info, wait, map);
        spin_unlock(&call_lock);
        return 0;
 }
  * @cpu: the CPU where func should run
  * @func: the function to run; this must be fast and non-blocking
  * @info: an arbitrary pointer to pass to the function
- * @nonatomic: unused
  * @wait: if true, wait (atomically) until function has completed on other CPUs
  *
  * Run a function on one processor.
  * hardware interrupt handler or from a bottom half.
  */
 int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
-                            int nonatomic, int wait)
+                            int wait)
 {
        spin_lock(&call_lock);
-       __smp_call_function_map(func, info, nonatomic, wait,
-                               cpumask_of_cpu(cpu));
+       __smp_call_function_map(func, info, wait, cpumask_of_cpu(cpu));
        spin_unlock(&call_lock);
        return 0;
 }
 {
        spin_lock(&call_lock);
        cpu_clear(smp_processor_id(), mask);
-       __smp_call_function_map(func, info, 0, wait, mask);
+       __smp_call_function_map(func, info, wait, mask);
        spin_unlock(&call_lock);
        return 0;
 }
 
         */
        memset(&etr_sync, 0, sizeof(etr_sync));
        preempt_disable();
-       smp_call_function(etr_sync_cpu_start, NULL, 0, 0);
+       smp_call_function(etr_sync_cpu_start, NULL, 0);
        local_irq_disable();
        etr_enable_sync_clock();
 
                rc = -EAGAIN;
        }
        local_irq_enable();
-       smp_call_function(etr_sync_cpu_end,NULL,0,0);
+       smp_call_function(etr_sync_cpu_end,NULL,0);
        preempt_enable();
        return rc;
 }
 
 
 void smp_send_stop(void)
 {
-       smp_call_function(stop_this_cpu, 0, 1, 0);
+       smp_call_function(stop_this_cpu, 0, 0);
 }
 
 void arch_send_call_function_ipi(cpumask_t mask)
        preempt_disable();
 
        if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
-               smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1);
+               smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
        } else {
                int i;
                for (i = 0; i < num_online_cpus(); i++)
                fd.vma = vma;
                fd.addr1 = start;
                fd.addr2 = end;
-               smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1);
+               smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
        } else {
                int i;
                for (i = 0; i < num_online_cpus(); i++)
 
                fd.vma = vma;
                fd.addr1 = page;
-               smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1);
+               smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
        } else {
                int i;
                for (i = 0; i < num_online_cpus(); i++)
        fd.addr1 = asid;
        fd.addr2 = vaddr;
 
-       smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1, 1);
+       smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
        local_flush_tlb_one(asid, vaddr);
 }
 
  * smp_call_function(): Run a function on all other CPUs.
  * @func: The function to run. This must be fast and non-blocking.
  * @info: An arbitrary pointer to pass to the function.
- * @nonatomic: currently unused.
  * @wait: If true, wait (atomically) until function has completed on other CPUs.
  *
  * Returns 0 on success, else a negative status code. Does not return until
  * hardware interrupt handler or from a bottom half handler.
  */
 static int sparc64_smp_call_function_mask(void (*func)(void *info), void *info,
-                                         int nonatomic, int wait,
-                                         cpumask_t mask)
+                                         int wait, cpumask_t mask)
 {
        struct call_data_struct data;
        int cpus;
        return 0;
 }
 
-int smp_call_function(void (*func)(void *info), void *info,
-                     int nonatomic, int wait)
+int smp_call_function(void (*func)(void *info), void *info, int wait)
 {
-       return sparc64_smp_call_function_mask(func, info, nonatomic, wait,
-                                               cpu_online_map);
+       return sparc64_smp_call_function_mask(func, info, wait, cpu_online_map);
 }
 
 void smp_call_function_client(int irq, struct pt_regs *regs)
 
 void smp_tsb_sync(struct mm_struct *mm)
 {
-       sparc64_smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask);
+       sparc64_smp_call_function_mask(tsb_sync, mm, 1, mm->cpu_vm_mask);
 }
 
 extern unsigned long xcall_flush_tlb_mm;
 
        atomic_inc(&scf_finished);
 }
 
-int smp_call_function(void (*_func)(void *info), void *_info, int nonatomic,
-                     int wait)
+int smp_call_function(void (*_func)(void *info), void *_info, int wait)
 {
        int cpus = num_online_cpus() - 1;
        int i;
 
        atomic_set(&data.gate,0);
 
        /*  Start the ball rolling on other CPUs  */
-       if (smp_call_function(ipi_handler, &data, 1, 0) != 0)
+       if (smp_call_function(ipi_handler, &data, 0) != 0)
                panic("mtrr: timed out waiting for other CPUs\n");
 
        local_irq_save(flags);
  */
 void mtrr_save_state(void)
 {
-       smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1, 1);
+       smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1);
 }
 
 static int __init mtrr_init_finialize(void)
 
        for (; count; count -= 16) {
                cmd.eax = pos;
                cmd.ecx = pos >> 32;
-               smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1, 1);
+               smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1);
                if (copy_to_user(tmp, &cmd, 16))
                        return -EFAULT;
                tmp += 16;
 
                load_LDT(pc);
                mask = cpumask_of_cpu(smp_processor_id());
                if (!cpus_equal(current->mm->cpu_vm_mask, mask))
-                       smp_call_function(flush_ldt, NULL, 1, 1);
+                       smp_call_function(flush_ldt, NULL, 1);
                preempt_enable();
 #else
                load_LDT(pc);
 
 
 #ifdef CONFIG_SMP
        if (nmi_watchdog == NMI_LOCAL_APIC)
-               smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
+               smp_call_function(nmi_cpu_busy, (void *)&endflag, 0);
 #endif
 
        for_each_possible_cpu(cpu)
 
 
 #ifdef CONFIG_SMP
        if (nmi_watchdog == NMI_LOCAL_APIC)
-               smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
+               smp_call_function(nmi_cpu_busy, (void *)&endflag, 0);
 #endif
 
        for (cpu = 0; cpu < NR_CPUS; cpu++)
 
        if (reboot_force)
                return;
 
-       smp_call_function(stop_this_cpu, NULL, 0, 0);
+       smp_call_function(stop_this_cpu, NULL, 0);
        local_irq_save(flags);
        disable_local_APIC();
        local_irq_restore(flags);
 
 {
        long cpu = (long)arg;
        if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
-               smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1);
+               smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1);
        return NOTIFY_DONE;
 }
 
 
 {
        if (vmx->vcpu.cpu == -1)
                return;
-       smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 0, 1);
+       smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1);
        vmx->launched = 0;
 }
 
 
         * So need not to call smp_call_function_single() in that case.
         */
        if (vcpu->guest_mode && vcpu->cpu != cpu)
-               smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0);
+               smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
        put_cpu();
 }
 
 
        rv.msr_no = msr_no;
        if (safe) {
-               smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 0, 1);
+               smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
                err = rv.err;
        } else {
-               smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 0, 1);
+               smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
        }
        *l = rv.l;
        *h = rv.h;
        rv.l = l;
        rv.h = h;
        if (safe) {
-               smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 0, 1);
+               smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
                err = rv.err;
        } else {
-               smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 0, 1);
+               smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
        }
 
        return err;
 
 /* broadcast a halt to all other CPUs */
 static void voyager_smp_send_stop(void)
 {
-       smp_call_function(smp_stop_cpu_function, NULL, 1, 1);
+       smp_call_function(smp_stop_cpu_function, NULL, 1);
 }
 
 /* this function is triggered in time.c when a clock tick fires
 
 
 void xen_smp_send_stop(void)
 {
-       smp_call_function(stop_self, NULL, 0, 0);
+       smp_call_function(stop_self, NULL, 0);
 }
 
 void xen_smp_send_reschedule(int cpu)
 
 static int acpi_processor_latency_notify(struct notifier_block *b,
                unsigned long l, void *v)
 {
-       smp_call_function(smp_callback, NULL, 0, 1);
+       smp_call_function(smp_callback, NULL, 1);
        return NOTIFY_OK;
 }
 
 
 static int cpuidle_latency_notify(struct notifier_block *b,
                unsigned long l, void *v)
 {
-       smp_call_function(smp_callback, NULL, 0, 1);
+       smp_call_function(smp_callback, NULL, 1);
        return NOTIFY_OK;
 }
 
 
 #else /* CONFIG_SMP */
 
 #define hard_smp_processor_id()                0
-#define smp_call_function_on_cpu(func,info,retry,wait,cpu)    ({ 0; })
+#define smp_call_function_on_cpu(func,info,wait,cpu)    ({ 0; })
 
 #endif /* CONFIG_SMP */
 
 
                           unsigned long arg3, unsigned long arg4, unsigned long arg5)
 { smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); }
 
-static inline int smp_call_function(void (*func)(void *info), void *info, int nonatomic, int wait)
+static inline int smp_call_function(void (*func)(void *info), void *info, int wait)
 {
        xc1((smpfunc_t)func, (unsigned long)info);
        return 0;
 
 /*
  * Call a function on all other processors
  */
-int smp_call_function(void(*func)(void *info), void *info, int retry, int wait);
+int smp_call_function(void(*func)(void *info), void *info, int wait);
 int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
                                int wait);
 int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
-                               int retry, int wait);
+                               int wait);
 void __smp_call_function_single(int cpuid, struct call_single_data *data);
 
 /*
 {
        return 0;
 }
-#define smp_call_function(func, info, retry, wait) \
+#define smp_call_function(func, info, wait) \
                        (up_smp_call_function(func, info))
 #define on_each_cpu(func,info,retry,wait)      \
        ({                                      \
 static inline void smp_send_reschedule(int cpu) { }
 #define num_booting_cpus()                     1
 #define smp_prepare_boot_cpu()                 do {} while (0)
-#define smp_call_function_single(cpuid, func, info, retry, wait) \
+#define smp_call_function_single(cpuid, func, info, wait) \
 ({ \
        WARN_ON(cpuid != 0);    \
        local_irq_disable();    \
 
  * smp_call_function_single - Run a function on a specific CPU
  * @func: The function to run. This must be fast and non-blocking.
  * @info: An arbitrary pointer to pass to the function.
- * @retry: Unused
  * @wait: If true, wait until function has completed on other CPUs.
  *
  * Returns 0 on success, else a negative status code. Note that @wait
  * we fall back to on-stack allocation.
  */
 int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
-                            int retry, int wait)
+                            int wait)
 {
        struct call_single_data d;
        unsigned long flags;
  * smp_call_function(): Run a function on all other CPUs.
  * @func: The function to run. This must be fast and non-blocking.
  * @info: An arbitrary pointer to pass to the function.
- * @natomic: Unused
  * @wait: If true, wait (atomically) until function has completed on other CPUs.
  *
  * Returns 0 on success, else a negative status code.
  * You must not call this function with disabled interrupts or from a
  * hardware interrupt handler or from a bottom half handler.
  */
-int smp_call_function(void (*func)(void *), void *info, int natomic, int wait)
+int smp_call_function(void (*func)(void *), void *info, int wait)
 {
        int ret;
 
 
        int ret = 0;
 
        preempt_disable();
-       ret = smp_call_function(func, info, retry, wait);
+       ret = smp_call_function(func, info, wait);
        local_irq_disable();
        func(info);
        local_irq_enable();
 
                       "offline CPU #%d\n", *oncpu);
        else
                smp_call_function_single(*oncpu, tick_do_broadcast_on_off,
-                                        &reason, 1, 1);
+                                        &reason, 1);
 }
 
 /*
 
        init_completion(&info.completion);
 
        local_bh_disable();
-       smp_call_function(flow_cache_flush_per_cpu, &info, 1, 0);
+       smp_call_function(flow_cache_flush_per_cpu, &info, 0);
        flow_cache_flush_tasklet((unsigned long)&info);
        local_bh_enable();
 
 
                if (cpu_isset(cpu, iucv_buffer_cpumask) &&
                    !cpu_isset(cpu, iucv_irq_cpumask))
                        smp_call_function_single(cpu, iucv_allow_cpu,
-                                                NULL, 0, 1);
+                                                NULL, 1);
        preempt_enable();
 }
 
        cpumask = iucv_irq_cpumask;
        cpu_clear(first_cpu(iucv_irq_cpumask), cpumask);
        for_each_cpu_mask(cpu, cpumask)
-               smp_call_function_single(cpu, iucv_block_cpu, NULL, 0, 1);
+               smp_call_function_single(cpu, iucv_block_cpu, NULL, 1);
 }
 
 /**
        rc = -EIO;
        preempt_disable();
        for_each_online_cpu(cpu)
-               smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1);
+               smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
        preempt_enable();
        if (cpus_empty(iucv_buffer_cpumask))
                /* No cpu could declare an iucv buffer. */
        case CPU_ONLINE_FROZEN:
        case CPU_DOWN_FAILED:
        case CPU_DOWN_FAILED_FROZEN:
-               smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1);
+               smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
                break;
        case CPU_DOWN_PREPARE:
        case CPU_DOWN_PREPARE_FROZEN:
                if (cpus_empty(cpumask))
                        /* Can't offline last IUCV enabled cpu. */
                        return NOTIFY_BAD;
-               smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 0, 1);
+               smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1);
                if (cpus_empty(iucv_irq_cpumask))
                        smp_call_function_single(first_cpu(iucv_buffer_cpumask),
-                                                iucv_allow_cpu, NULL, 0, 1);
+                                                iucv_allow_cpu, NULL, 1);
                break;
        }
        return NOTIFY_OK;
         * pending interrupts force them to the work queue by calling
         * an empty function on all cpus.
         */
-       smp_call_function(__iucv_cleanup_queue, NULL, 0, 1);
+       smp_call_function(__iucv_cleanup_queue, NULL, 1);
        spin_lock_irq(&iucv_queue_lock);
        list_for_each_entry_safe(p, n, &iucv_task_queue, list) {
                /* Remove stale work items from the task queue. */
 
        case CPU_UP_CANCELED:
                printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
                       cpu);
-               smp_call_function_single(cpu, hardware_disable, NULL, 0, 1);
+               smp_call_function_single(cpu, hardware_disable, NULL, 1);
                break;
        case CPU_ONLINE:
                printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
                       cpu);
-               smp_call_function_single(cpu, hardware_enable, NULL, 0, 1);
+               smp_call_function_single(cpu, hardware_enable, NULL, 1);
                break;
        }
        return NOTIFY_OK;
        for_each_online_cpu(cpu) {
                smp_call_function_single(cpu,
                                kvm_arch_check_processor_compat,
-                               &r, 0, 1);
+                               &r, 1);
                if (r < 0)
                        goto out_free_1;
        }