static inline int elf_core_copy_task_regs(struct task_struct *t,
                                          elf_gregset_t* elfregs)
 {
-       struct pt_regs *pp = ia64_task_regs(t);
-       ELF_CORE_COPY_REGS((*elfregs), pp);
+       ELF_CORE_COPY_REGS((*elfregs), task_pt_regs(t));
        return 1;
 }
 
 
         */
        fp_tos = (fsr>>11)&0x7;
        fr8_st_map = (8-fp_tos)&0x7;
-       ptp = ia64_task_regs(tsk);
+       ptp = task_pt_regs(tsk);
        fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15);
        ia64f2ia32f(fpregp, &ptp->f8);
        copy_to_user(&save->_st[(0+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
        fr8_st_map = (8-fp_tos)&0x7;
        fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15);
 
-       ptp = ia64_task_regs(tsk);
+       ptp = task_pt_regs(tsk);
        copy_from_user(fpregp, &save->_st[(0+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
        ia32f2ia64f(&ptp->f8, fpregp);
        copy_from_user(fpregp, &save->_st[(1+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
 
 void
 ia32_load_segment_descriptors (struct task_struct *task)
 {
-       struct pt_regs *regs = ia64_task_regs(task);
+       struct pt_regs *regs = task_pt_regs(task);
 
        /* Setup the segment descriptors */
        regs->r24 = load_desc(regs->r16 >> 16);         /* ESD */
 ia32_load_state (struct task_struct *t)
 {
        unsigned long eflag, fsr, fcr, fir, fdr, tssd;
-       struct pt_regs *regs = ia64_task_regs(t);
+       struct pt_regs *regs = task_pt_regs(t);
 
        eflag = t->thread.eflag;
        fsr = t->thread.fsr;
 
 {
        struct pt_regs *child_regs;
 
-       child_regs = ia64_task_regs(child);
+       child_regs = task_pt_regs(child);
        switch (regno / sizeof(int)) {
              case PT_EBX: return child_regs->r11;
              case PT_ECX: return child_regs->r9;
 {
        struct pt_regs *child_regs;
 
-       child_regs = ia64_task_regs(child);
+       child_regs = task_pt_regs(child);
        switch (regno / sizeof(int)) {
              case PT_EBX: child_regs->r11 = value; break;
              case PT_ECX: child_regs->r9 = value; break;
         *  Stack frames start with 16-bytes of temp space
         */
        swp = (struct switch_stack *)(tsk->thread.ksp + 16);
-       ptp = ia64_task_regs(tsk);
+       ptp = task_pt_regs(tsk);
        tos = (tsk->thread.fsr >> 11) & 7;
        for (i = 0; i < 8; i++)
                put_fpreg(i, &save->st_space[i], ptp, swp, tos);
         *  Stack frames start with 16-bytes of temp space
         */
        swp = (struct switch_stack *)(tsk->thread.ksp + 16);
-       ptp = ia64_task_regs(tsk);
+       ptp = task_pt_regs(tsk);
        tos = (tsk->thread.fsr >> 11) & 7;
        for (i = 0; i < 8; i++)
                get_fpreg(i, &save->st_space[i], ptp, swp, tos);
          *  Stack frames start with 16-bytes of temp space
          */
         swp = (struct switch_stack *)(tsk->thread.ksp + 16);
-        ptp = ia64_task_regs(tsk);
+        ptp = task_pt_regs(tsk);
        tos = (tsk->thread.fsr >> 11) & 7;
         for (i = 0; i < 8; i++)
                put_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
         *  Stack frames start with 16-bytes of temp space
         */
        swp = (struct switch_stack *)(tsk->thread.ksp + 16);
-       ptp = ia64_task_regs(tsk);
+       ptp = task_pt_regs(tsk);
        tos = (tsk->thread.fsr >> 11) & 7;
        for (i = 0; i < 8; i++)
        get_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
 
 pfm_syswide_force_stop(void *info)
 {
        pfm_context_t   *ctx = (pfm_context_t *)info;
-       struct pt_regs *regs = ia64_task_regs(current);
+       struct pt_regs *regs = task_pt_regs(current);
        struct task_struct *owner;
        unsigned long flags;
        int ret;
        is_system = ctx->ctx_fl_system;
 
        task = PFM_CTX_TASK(ctx);
-       regs = ia64_task_regs(task);
+       regs = task_pt_regs(task);
 
        DPRINT(("ctx_state=%d is_current=%d\n",
                state,
        is_system = ctx->ctx_fl_system;
 
        task = PFM_CTX_TASK(ctx);
-       regs = ia64_task_regs(task);
+       regs = task_pt_regs(task);
 
        DPRINT(("ctx_state=%d is_current=%d\n", 
                state,
                 */
                ia64_psr(regs)->up = 0;
        } else {
-               tregs = ia64_task_regs(task);
+               tregs = task_pt_regs(task);
 
                /*
                 * stop monitoring at the user level
                ia64_psr(regs)->up = 1;
 
        } else {
-               tregs = ia64_task_regs(ctx->ctx_task);
+               tregs = task_pt_regs(ctx->ctx_task);
 
                /*
                 * start monitoring at the kernel level the next
                /*
                 * when not current, task MUST be stopped, so this is safe
                 */
-               regs = ia64_task_regs(task);
+               regs = task_pt_regs(task);
 
                /* force a full reload */
                ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
        /*
         * per-task mode
         */
-       tregs = task == current ? regs : ia64_task_regs(task);
+       tregs = task == current ? regs : task_pt_regs(task);
 
        if (task == current) {
                /*
 {
        pfm_context_t *ctx;
        unsigned long flags;
-       struct pt_regs *regs = ia64_task_regs(task);
+       struct pt_regs *regs = task_pt_regs(task);
        int ret, state;
        int free_ok = 0;
 
        if (unlikely(ret)) goto abort_locked;
 
 skip_fd:
-       ret = (*func)(ctx, args_k, count, ia64_task_regs(current));
+       ret = (*func)(ctx, args_k, count, task_pt_regs(current));
 
        call_made = 1;
 
 
        pfm_clear_task_notify();
 
-       regs = ia64_task_regs(current);
+       regs = task_pt_regs(current);
 
        /*
         * extract reason for being here and clear
         * on every CPU, so we can rely on the pid to identify the idle task.
         */
        if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {
-               regs = ia64_task_regs(task);
+               regs = task_pt_regs(task);
                ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
                return;
        }
        flags = pfm_protect_ctx_ctxsw(ctx);
 
        if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
-               struct pt_regs *regs = ia64_task_regs(task);
+               struct pt_regs *regs = task_pt_regs(task);
 
                pfm_clear_psr_up();
 
        BUG_ON(psr & IA64_PSR_I);
 
        if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
-               struct pt_regs *regs = ia64_task_regs(task);
+               struct pt_regs *regs = task_pt_regs(task);
 
                BUG_ON(ctx->ctx_smpl_hdr);
 
 {
        struct pt_regs *regs;
 
-       regs = ia64_task_regs(current);
+       regs = task_pt_regs(current);
 
        DPRINT(("called\n"));
 
 {
        struct pt_regs *regs;
 
-       regs = ia64_task_regs(current);
+       regs = task_pt_regs(current);
 
        DPRINT(("called\n"));
 
        local_irq_save(flags);
 
        this_cpu = smp_processor_id();
-       regs     = ia64_task_regs(current);
+       regs     = task_pt_regs(current);
        info     = PFM_CPUINFO_GET();
        dcr      = ia64_getreg(_IA64_REG_CR_DCR);
 
 
 #endif
 
 #ifdef CONFIG_IA32_SUPPORT
-       if (IS_IA32_PROCESS(ia64_task_regs(task)))
+       if (IS_IA32_PROCESS(task_pt_regs(task)))
                ia32_save_state(task);
 #endif
 }
 #endif
 
 #ifdef CONFIG_IA32_SUPPORT
-       if (IS_IA32_PROCESS(ia64_task_regs(task)))
+       if (IS_IA32_PROCESS(task_pt_regs(task)))
                ia32_load_state(task);
 #endif
 }
         * If we're cloning an IA32 task then save the IA32 extra
         * state from the current task to the new task
         */
-       if (IS_IA32_PROCESS(ia64_task_regs(current))) {
+       if (IS_IA32_PROCESS(task_pt_regs(current))) {
                ia32_save_state(p);
                if (clone_flags & CLONE_SETTLS)
                        retval = ia32_clone_tls(p, child_ptregs);
 kernel_thread_helper (int (*fn)(void *), void *arg)
 {
 #ifdef CONFIG_IA32_SUPPORT
-       if (IS_IA32_PROCESS(ia64_task_regs(current))) {
+       if (IS_IA32_PROCESS(task_pt_regs(current))) {
                /* A kernel thread is always a 64-bit process. */
                current->thread.map_base  = DEFAULT_MAP_BASE;
                current->thread.task_size = DEFAULT_TASK_SIZE;
        current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);
        ia64_drop_fpu(current);
 #ifdef CONFIG_IA32_SUPPORT
-       if (IS_IA32_PROCESS(ia64_task_regs(current))) {
+       if (IS_IA32_PROCESS(task_pt_regs(current))) {
                ia32_drop_partial_page_list(current);
                current->thread.task_size = IA32_PAGE_OFFSET;
                set_fs(USER_DS);
        if (current->thread.flags & IA64_THREAD_DBG_VALID)
                pfm_release_debug_registers(current);
 #endif
-       if (IS_IA32_PROCESS(ia64_task_regs(current)))
+       if (IS_IA32_PROCESS(task_pt_regs(current)))
                ia32_drop_partial_page_list(current);
 }
 
 
        long num_regs, nbits;
        struct pt_regs *pt;
 
-       pt = ia64_task_regs(task);
+       pt = task_pt_regs(task);
        kbsp = (unsigned long *) sw->ar_bspstore;
        ubspstore = (unsigned long *) pt->ar_bspstore;
 
        struct pt_regs *pt;
        unsigned long cfm, *urbs_kargs;
 
-       pt = ia64_task_regs(task);
+       pt = task_pt_regs(task);
        kbsp = (unsigned long *) sw->ar_bspstore;
        ubspstore = (unsigned long *) pt->ar_bspstore;
 
 
        urbs_end = (long *) user_rbs_end;
        laddr = (unsigned long *) addr;
-       child_regs = ia64_task_regs(child);
+       child_regs = task_pt_regs(child);
        bspstore = (unsigned long *) child_regs->ar_bspstore;
        krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
        if (on_kernel_rbs(addr, (unsigned long) bspstore,
        struct pt_regs *child_regs;
 
        laddr = (unsigned long *) addr;
-       child_regs = ia64_task_regs(child);
+       child_regs = task_pt_regs(child);
        bspstore = (unsigned long *) child_regs->ar_bspstore;
        krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
        if (on_kernel_rbs(addr, (unsigned long) bspstore,
                 */
                return 0;
 
-       thread_regs = ia64_task_regs(thread);
+       thread_regs = task_pt_regs(thread);
        thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL);
        if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end))
                return 0;
 inline void
 ia64_flush_fph (struct task_struct *task)
 {
-       struct ia64_psr *psr = ia64_psr(ia64_task_regs(task));
+       struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
 
        /*
         * Prevent migrating this task while
 void
 ia64_sync_fph (struct task_struct *task)
 {
-       struct ia64_psr *psr = ia64_psr(ia64_task_regs(task));
+       struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
 
        ia64_flush_fph(task);
        if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
                                          + offsetof(struct pt_regs, reg)))
 
 
-       pt = ia64_task_regs(child);
+       pt = task_pt_regs(child);
        sw = (struct switch_stack *) (child->thread.ksp + 16);
 
        if ((addr & 0x7) != 0) {
        if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs)))
                return -EIO;
 
-       pt = ia64_task_regs(child);
+       pt = task_pt_regs(child);
        sw = (struct switch_stack *) (child->thread.ksp + 16);
        unw_init_from_blocked_task(&info, child);
        if (unw_unwind_to_user(&info) < 0) {
        if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs)))
                return -EIO;
 
-       pt = ia64_task_regs(child);
+       pt = task_pt_regs(child);
        sw = (struct switch_stack *) (child->thread.ksp + 16);
        unw_init_from_blocked_task(&info, child);
        if (unw_unwind_to_user(&info) < 0) {
 void
 ptrace_disable (struct task_struct *child)
 {
-       struct ia64_psr *child_psr = ia64_psr(ia64_task_regs(child));
+       struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
 
        /* make sure the single step/taken-branch trap bits are not set: */
        child_psr->ss = 0;
        if (ret < 0)
                goto out_tsk;
 
-       pt = ia64_task_regs(child);
+       pt = task_pt_regs(child);
        sw = (struct switch_stack *) (child->thread.ksp + 16);
 
        switch (request) {
 
 #endif
 
        /* Clear the stack memory reserved for pt_regs: */
-       memset(ia64_task_regs(current), 0, sizeof(struct pt_regs));
+       memset(task_pt_regs(current), 0, sizeof(struct pt_regs));
 
        ia64_set_kr(IA64_KR_FPU_OWNER, 0);
 
 
 asmlinkage long
 sys_pipe (void)
 {
-       struct pt_regs *regs = ia64_task_regs(current);
+       struct pt_regs *regs = task_pt_regs(current);
        int fd[2];
        int retval;
 
 
 #ifdef CONFIG_X86_64
 #  define COMPAT_TEST is_compat_task()
 #elif defined(CONFIG_IA64)
-#  define COMPAT_TEST IS_IA32_PROCESS(ia64_task_regs(current))
+#  define COMPAT_TEST IS_IA32_PROCESS(task_pt_regs(current))
 #elif defined(CONFIG_S390)
 #  define COMPAT_TEST test_thread_flag(TIF_31BIT)
 #elif defined(CONFIG_MIPS)
 
 static __inline__ void __user *
 compat_alloc_user_space (long len)
 {
-       struct pt_regs *regs = ia64_task_regs(current);
+       struct pt_regs *regs = task_pt_regs(current);
        return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len);
 }
 
 
 /* Return instruction pointer of blocked task TSK.  */
 #define KSTK_EIP(tsk)                                  \
   ({                                                   \
-       struct pt_regs *_regs = ia64_task_regs(tsk);    \
+       struct pt_regs *_regs = task_pt_regs(tsk);      \
        _regs->cr_iip + ia64_psr(_regs)->ri;            \
   })
 
 
 })
 
   /* given a pointer to a task_struct, return the user's pt_regs */
-# define ia64_task_regs(t)             (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
+# define task_pt_regs(t)               (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
 # define ia64_psr(regs)                        ((struct ia64_psr *) &(regs)->cr_ipsr)
 # define user_mode(regs)               (((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0)
 # define user_stack(task,regs) ((long) regs - (long) task == IA64_STK_OFFSET - sizeof(*regs))
    *
    * On ia64, we can clear the user's pt_regs->r8 to force a successful syscall.
    */
-# define force_successful_syscall_return()     (ia64_task_regs(current)->r8 = 0)
+# define force_successful_syscall_return()     (task_pt_regs(current)->r8 = 0)
 
   struct task_struct;                  /* forward decl */
   struct unw_frame_info;               /* forward decl */
 
 
 #define IA64_HAS_EXTRA_STATE(t)                                                        \
        ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID)       \
-        || IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE())
+        || IS_IA32_PROCESS(task_pt_regs(t)) || PERFMON_IS_SYSWIDE())
 
 #define __switch_to(prev,next,last) do {                                                        \
        if (IA64_HAS_EXTRA_STATE(prev))                                                          \
                ia64_save_extra(prev);                                                           \
        if (IA64_HAS_EXTRA_STATE(next))                                                          \
                ia64_load_extra(next);                                                           \
-       ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);                    \
+       ia64_psr(task_pt_regs(next))->dfh = !ia64_is_local_fpu_owner(next);                      \
        (last) = ia64_switch_to((next));                                                         \
 } while (0)
 
  * the latest fph state from another CPU.  In other words: eager save, lazy restore.
  */
 # define switch_to(prev,next,last) do {                                                \
-       if (ia64_psr(ia64_task_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) {                             \
-               ia64_psr(ia64_task_regs(prev))->mfh = 0;                        \
+       if (ia64_psr(task_pt_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) {                               \
+               ia64_psr(task_pt_regs(prev))->mfh = 0;                  \
                (prev)->thread.flags |= IA64_THREAD_FPH_VALID;                  \
                __ia64_save_fpu((prev)->thread.fph);                            \
        }                                                                       \