]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/ia64/kernel/ptrace.c
Merge branch 'fix/hda' into topic/hda
[linux-2.6-omap-h63xx.git] / arch / ia64 / kernel / ptrace.c
1 /*
2  * Kernel support for the ptrace() and syscall tracing interfaces.
3  *
4  * Copyright (C) 1999-2005 Hewlett-Packard Co
5  *      David Mosberger-Tang <davidm@hpl.hp.com>
6  * Copyright (C) 2006 Intel Co
7  *  2006-08-12  - IA64 Native Utrace implementation support added by
8  *      Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
9  *
10  * Derived from the x86 and Alpha versions.
11  */
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/mm.h>
16 #include <linux/errno.h>
17 #include <linux/ptrace.h>
18 #include <linux/smp_lock.h>
19 #include <linux/user.h>
20 #include <linux/security.h>
21 #include <linux/audit.h>
22 #include <linux/signal.h>
23 #include <linux/regset.h>
24 #include <linux/elf.h>
25 #include <linux/tracehook.h>
26
27 #include <asm/pgtable.h>
28 #include <asm/processor.h>
29 #include <asm/ptrace_offsets.h>
30 #include <asm/rse.h>
31 #include <asm/system.h>
32 #include <asm/uaccess.h>
33 #include <asm/unwind.h>
34 #ifdef CONFIG_PERFMON
35 #include <asm/perfmon.h>
36 #endif
37
38 #include "entry.h"
39
40 /*
41  * Bits in the PSR that we allow ptrace() to change:
42  *      be, up, ac, mfl, mfh (the user mask; five bits total)
43  *      db (debug breakpoint fault; one bit)
44  *      id (instruction debug fault disable; one bit)
45  *      dd (data debug fault disable; one bit)
46  *      ri (restart instruction; two bits)
47  *      is (instruction set; one bit)
48  */
49 #define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS      \
50                    | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
51
52 #define MASK(nbits)     ((1UL << (nbits)) - 1)  /* mask with NBITS bits set */
53 #define PFM_MASK        MASK(38)
54
55 #define PTRACE_DEBUG    0
56
57 #if PTRACE_DEBUG
58 # define dprintk(format...)     printk(format)
59 # define inline
60 #else
61 # define dprintk(format...)
62 #endif
63
64 /* Return TRUE if PT was created due to kernel-entry via a system-call.  */
65
66 static inline int
67 in_syscall (struct pt_regs *pt)
68 {
69         return (long) pt->cr_ifs >= 0;
70 }
71
72 /*
73  * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT
74  * bitset where bit i is set iff the NaT bit of register i is set.
75  */
76 unsigned long
77 ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
78 {
79 #       define GET_BITS(first, last, unat)                              \
80         ({                                                              \
81                 unsigned long bit = ia64_unat_pos(&pt->r##first);       \
82                 unsigned long nbits = (last - first + 1);               \
83                 unsigned long mask = MASK(nbits) << first;              \
84                 unsigned long dist;                                     \
85                 if (bit < first)                                        \
86                         dist = 64 + bit - first;                        \
87                 else                                                    \
88                         dist = bit - first;                             \
89                 ia64_rotr(unat, dist) & mask;                           \
90         })
91         unsigned long val;
92
93         /*
94          * Registers that are stored consecutively in struct pt_regs
95          * can be handled in parallel.  If the register order in
96          * struct_pt_regs changes, this code MUST be updated.
97          */
98         val  = GET_BITS( 1,  1, scratch_unat);
99         val |= GET_BITS( 2,  3, scratch_unat);
100         val |= GET_BITS(12, 13, scratch_unat);
101         val |= GET_BITS(14, 14, scratch_unat);
102         val |= GET_BITS(15, 15, scratch_unat);
103         val |= GET_BITS( 8, 11, scratch_unat);
104         val |= GET_BITS(16, 31, scratch_unat);
105         return val;
106
107 #       undef GET_BITS
108 }
109
110 /*
111  * Set the NaT bits for the scratch registers according to NAT and
112  * return the resulting unat (assuming the scratch registers are
113  * stored in PT).
114  */
115 unsigned long
116 ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat)
117 {
118 #       define PUT_BITS(first, last, nat)                               \
119         ({                                                              \
120                 unsigned long bit = ia64_unat_pos(&pt->r##first);       \
121                 unsigned long nbits = (last - first + 1);               \
122                 unsigned long mask = MASK(nbits) << first;              \
123                 long dist;                                              \
124                 if (bit < first)                                        \
125                         dist = 64 + bit - first;                        \
126                 else                                                    \
127                         dist = bit - first;                             \
128                 ia64_rotl(nat & mask, dist);                            \
129         })
130         unsigned long scratch_unat;
131
132         /*
133          * Registers that are stored consecutively in struct pt_regs
134          * can be handled in parallel.  If the register order in
135          * struct_pt_regs changes, this code MUST be updated.
136          */
137         scratch_unat  = PUT_BITS( 1,  1, nat);
138         scratch_unat |= PUT_BITS( 2,  3, nat);
139         scratch_unat |= PUT_BITS(12, 13, nat);
140         scratch_unat |= PUT_BITS(14, 14, nat);
141         scratch_unat |= PUT_BITS(15, 15, nat);
142         scratch_unat |= PUT_BITS( 8, 11, nat);
143         scratch_unat |= PUT_BITS(16, 31, nat);
144
145         return scratch_unat;
146
147 #       undef PUT_BITS
148 }
149
150 #define IA64_MLX_TEMPLATE       0x2
151 #define IA64_MOVL_OPCODE        6
152
153 void
154 ia64_increment_ip (struct pt_regs *regs)
155 {
156         unsigned long w0, ri = ia64_psr(regs)->ri + 1;
157
158         if (ri > 2) {
159                 ri = 0;
160                 regs->cr_iip += 16;
161         } else if (ri == 2) {
162                 get_user(w0, (char __user *) regs->cr_iip + 0);
163                 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
164                         /*
165                          * rfi'ing to slot 2 of an MLX bundle causes
166                          * an illegal operation fault.  We don't want
167                          * that to happen...
168                          */
169                         ri = 0;
170                         regs->cr_iip += 16;
171                 }
172         }
173         ia64_psr(regs)->ri = ri;
174 }
175
176 void
177 ia64_decrement_ip (struct pt_regs *regs)
178 {
179         unsigned long w0, ri = ia64_psr(regs)->ri - 1;
180
181         if (ia64_psr(regs)->ri == 0) {
182                 regs->cr_iip -= 16;
183                 ri = 2;
184                 get_user(w0, (char __user *) regs->cr_iip + 0);
185                 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
186                         /*
187                          * rfi'ing to slot 2 of an MLX bundle causes
188                          * an illegal operation fault.  We don't want
189                          * that to happen...
190                          */
191                         ri = 1;
192                 }
193         }
194         ia64_psr(regs)->ri = ri;
195 }
196
197 /*
198  * This routine is used to read an rnat bits that are stored on the
199  * kernel backing store.  Since, in general, the alignment of the user
200  * and kernel are different, this is not completely trivial.  In
201  * essence, we need to construct the user RNAT based on up to two
202  * kernel RNAT values and/or the RNAT value saved in the child's
203  * pt_regs.
204  *
205  * user rbs
206  *
207  * +--------+ <-- lowest address
208  * | slot62 |
209  * +--------+
210  * |  rnat  | 0x....1f8
211  * +--------+
212  * | slot00 | \
213  * +--------+ |
214  * | slot01 | > child_regs->ar_rnat
215  * +--------+ |
216  * | slot02 | /                         kernel rbs
217  * +--------+                           +--------+
218  *          <- child_regs->ar_bspstore  | slot61 | <-- krbs
219  * +- - - - +                           +--------+
220  *                                      | slot62 |
221  * +- - - - +                           +--------+
222  *                                      |  rnat  |
223  * +- - - - +                           +--------+
224  *   vrnat                              | slot00 |
225  * +- - - - +                           +--------+
226  *                                      =        =
227  *                                      +--------+
228  *                                      | slot00 | \
229  *                                      +--------+ |
230  *                                      | slot01 | > child_stack->ar_rnat
231  *                                      +--------+ |
232  *                                      | slot02 | /
233  *                                      +--------+
234  *                                                <--- child_stack->ar_bspstore
235  *
236  * The way to think of this code is as follows: bit 0 in the user rnat
237  * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat
238  * value.  The kernel rnat value holding this bit is stored in
239  * variable rnat0.  rnat1 is loaded with the kernel rnat value that
240  * form the upper bits of the user rnat value.
241  *
242  * Boundary cases:
243  *
244  * o when reading the rnat "below" the first rnat slot on the kernel
245  *   backing store, rnat0/rnat1 are set to 0 and the low order bits are
246  *   merged in from pt->ar_rnat.
247  *
248  * o when reading the rnat "above" the last rnat slot on the kernel
249  *   backing store, rnat0/rnat1 gets its value from sw->ar_rnat.
250  */
251 static unsigned long
252 get_rnat (struct task_struct *task, struct switch_stack *sw,
253           unsigned long *krbs, unsigned long *urnat_addr,
254           unsigned long *urbs_end)
255 {
256         unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr;
257         unsigned long umask = 0, mask, m;
258         unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
259         long num_regs, nbits;
260         struct pt_regs *pt;
261
262         pt = task_pt_regs(task);
263         kbsp = (unsigned long *) sw->ar_bspstore;
264         ubspstore = (unsigned long *) pt->ar_bspstore;
265
266         if (urbs_end < urnat_addr)
267                 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end);
268         else
269                 nbits = 63;
270         mask = MASK(nbits);
271         /*
272          * First, figure out which bit number slot 0 in user-land maps
273          * to in the kernel rnat.  Do this by figuring out how many
274          * register slots we're beyond the user's backingstore and
275          * then computing the equivalent address in kernel space.
276          */
277         num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
278         slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
279         shift = ia64_rse_slot_num(slot0_kaddr);
280         rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
281         rnat0_kaddr = rnat1_kaddr - 64;
282
283         if (ubspstore + 63 > urnat_addr) {
284                 /* some bits need to be merged in from pt->ar_rnat */
285                 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
286                 urnat = (pt->ar_rnat & umask);
287                 mask &= ~umask;
288                 if (!mask)
289                         return urnat;
290         }
291
292         m = mask << shift;
293         if (rnat0_kaddr >= kbsp)
294                 rnat0 = sw->ar_rnat;
295         else if (rnat0_kaddr > krbs)
296                 rnat0 = *rnat0_kaddr;
297         urnat |= (rnat0 & m) >> shift;
298
299         m = mask >> (63 - shift);
300         if (rnat1_kaddr >= kbsp)
301                 rnat1 = sw->ar_rnat;
302         else if (rnat1_kaddr > krbs)
303                 rnat1 = *rnat1_kaddr;
304         urnat |= (rnat1 & m) << (63 - shift);
305         return urnat;
306 }
307
308 /*
309  * The reverse of get_rnat.
310  */
311 static void
312 put_rnat (struct task_struct *task, struct switch_stack *sw,
313           unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat,
314           unsigned long *urbs_end)
315 {
316         unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
317         unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
318         long num_regs, nbits;
319         struct pt_regs *pt;
320         unsigned long cfm, *urbs_kargs;
321
322         pt = task_pt_regs(task);
323         kbsp = (unsigned long *) sw->ar_bspstore;
324         ubspstore = (unsigned long *) pt->ar_bspstore;
325
326         urbs_kargs = urbs_end;
327         if (in_syscall(pt)) {
328                 /*
329                  * If entered via syscall, don't allow user to set rnat bits
330                  * for syscall args.
331                  */
332                 cfm = pt->cr_ifs;
333                 urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f));
334         }
335
336         if (urbs_kargs >= urnat_addr)
337                 nbits = 63;
338         else {
339                 if ((urnat_addr - 63) >= urbs_kargs)
340                         return;
341                 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs);
342         }
343         mask = MASK(nbits);
344
345         /*
346          * First, figure out which bit number slot 0 in user-land maps
347          * to in the kernel rnat.  Do this by figuring out how many
348          * register slots we're beyond the user's backingstore and
349          * then computing the equivalent address in kernel space.
350          */
351         num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
352         slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
353         shift = ia64_rse_slot_num(slot0_kaddr);
354         rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
355         rnat0_kaddr = rnat1_kaddr - 64;
356
357         if (ubspstore + 63 > urnat_addr) {
358                 /* some bits need to be place in pt->ar_rnat: */
359                 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
360                 pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
361                 mask &= ~umask;
362                 if (!mask)
363                         return;
364         }
365         /*
366          * Note: Section 11.1 of the EAS guarantees that bit 63 of an
367          * rnat slot is ignored. so we don't have to clear it here.
368          */
369         rnat0 = (urnat << shift);
370         m = mask << shift;
371         if (rnat0_kaddr >= kbsp)
372                 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
373         else if (rnat0_kaddr > krbs)
374                 *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
375
376         rnat1 = (urnat >> (63 - shift));
377         m = mask >> (63 - shift);
378         if (rnat1_kaddr >= kbsp)
379                 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
380         else if (rnat1_kaddr > krbs)
381                 *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
382 }
383
384 static inline int
385 on_kernel_rbs (unsigned long addr, unsigned long bspstore,
386                unsigned long urbs_end)
387 {
388         unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *)
389                                                       urbs_end);
390         return (addr >= bspstore && addr <= (unsigned long) rnat_addr);
391 }
392
393 /*
394  * Read a word from the user-level backing store of task CHILD.  ADDR
395  * is the user-level address to read the word from, VAL a pointer to
396  * the return value, and USER_BSP gives the end of the user-level
397  * backing store (i.e., it's the address that would be in ar.bsp after
398  * the user executed a "cover" instruction).
399  *
400  * This routine takes care of accessing the kernel register backing
401  * store for those registers that got spilled there.  It also takes
402  * care of calculating the appropriate RNaT collection words.
403  */
404 long
405 ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
406            unsigned long user_rbs_end, unsigned long addr, long *val)
407 {
408         unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr;
409         struct pt_regs *child_regs;
410         size_t copied;
411         long ret;
412
413         urbs_end = (long *) user_rbs_end;
414         laddr = (unsigned long *) addr;
415         child_regs = task_pt_regs(child);
416         bspstore = (unsigned long *) child_regs->ar_bspstore;
417         krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
418         if (on_kernel_rbs(addr, (unsigned long) bspstore,
419                           (unsigned long) urbs_end))
420         {
421                 /*
422                  * Attempt to read the RBS in an area that's actually
423                  * on the kernel RBS => read the corresponding bits in
424                  * the kernel RBS.
425                  */
426                 rnat_addr = ia64_rse_rnat_addr(laddr);
427                 ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end);
428
429                 if (laddr == rnat_addr) {
430                         /* return NaT collection word itself */
431                         *val = ret;
432                         return 0;
433                 }
434
435                 if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) {
436                         /*
437                          * It is implementation dependent whether the
438                          * data portion of a NaT value gets saved on a
439                          * st8.spill or RSE spill (e.g., see EAS 2.6,
440                          * 4.4.4.6 Register Spill and Fill).  To get
441                          * consistent behavior across all possible
442                          * IA-64 implementations, we return zero in
443                          * this case.
444                          */
445                         *val = 0;
446                         return 0;
447                 }
448
449                 if (laddr < urbs_end) {
450                         /*
451                          * The desired word is on the kernel RBS and
452                          * is not a NaT.
453                          */
454                         regnum = ia64_rse_num_regs(bspstore, laddr);
455                         *val = *ia64_rse_skip_regs(krbs, regnum);
456                         return 0;
457                 }
458         }
459         copied = access_process_vm(child, addr, &ret, sizeof(ret), 0);
460         if (copied != sizeof(ret))
461                 return -EIO;
462         *val = ret;
463         return 0;
464 }
465
466 long
467 ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
468            unsigned long user_rbs_end, unsigned long addr, long val)
469 {
470         unsigned long *bspstore, *krbs, regnum, *laddr;
471         unsigned long *urbs_end = (long *) user_rbs_end;
472         struct pt_regs *child_regs;
473
474         laddr = (unsigned long *) addr;
475         child_regs = task_pt_regs(child);
476         bspstore = (unsigned long *) child_regs->ar_bspstore;
477         krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
478         if (on_kernel_rbs(addr, (unsigned long) bspstore,
479                           (unsigned long) urbs_end))
480         {
481                 /*
482                  * Attempt to write the RBS in an area that's actually
483                  * on the kernel RBS => write the corresponding bits
484                  * in the kernel RBS.
485                  */
486                 if (ia64_rse_is_rnat_slot(laddr))
487                         put_rnat(child, child_stack, krbs, laddr, val,
488                                  urbs_end);
489                 else {
490                         if (laddr < urbs_end) {
491                                 regnum = ia64_rse_num_regs(bspstore, laddr);
492                                 *ia64_rse_skip_regs(krbs, regnum) = val;
493                         }
494                 }
495         } else if (access_process_vm(child, addr, &val, sizeof(val), 1)
496                    != sizeof(val))
497                 return -EIO;
498         return 0;
499 }
500
501 /*
502  * Calculate the address of the end of the user-level register backing
503  * store.  This is the address that would have been stored in ar.bsp
504  * if the user had executed a "cover" instruction right before
505  * entering the kernel.  If CFMP is not NULL, it is used to return the
506  * "current frame mask" that was active at the time the kernel was
507  * entered.
508  */
509 unsigned long
510 ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt,
511                        unsigned long *cfmp)
512 {
513         unsigned long *krbs, *bspstore, cfm = pt->cr_ifs;
514         long ndirty;
515
516         krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
517         bspstore = (unsigned long *) pt->ar_bspstore;
518         ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
519
520         if (in_syscall(pt))
521                 ndirty += (cfm & 0x7f);
522         else
523                 cfm &= ~(1UL << 63);    /* clear valid bit */
524
525         if (cfmp)
526                 *cfmp = cfm;
527         return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
528 }
529
530 /*
531  * Synchronize (i.e, write) the RSE backing store living in kernel
532  * space to the VM of the CHILD task.  SW and PT are the pointers to
533  * the switch_stack and pt_regs structures, respectively.
534  * USER_RBS_END is the user-level address at which the backing store
535  * ends.
536  */
537 long
538 ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
539                     unsigned long user_rbs_start, unsigned long user_rbs_end)
540 {
541         unsigned long addr, val;
542         long ret;
543
544         /* now copy word for word from kernel rbs to user rbs: */
545         for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
546                 ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
547                 if (ret < 0)
548                         return ret;
549                 if (access_process_vm(child, addr, &val, sizeof(val), 1)
550                     != sizeof(val))
551                         return -EIO;
552         }
553         return 0;
554 }
555
556 static long
557 ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw,
558                 unsigned long user_rbs_start, unsigned long user_rbs_end)
559 {
560         unsigned long addr, val;
561         long ret;
562
563         /* now copy word for word from user rbs to kernel rbs: */
564         for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
565                 if (access_process_vm(child, addr, &val, sizeof(val), 0)
566                                 != sizeof(val))
567                         return -EIO;
568
569                 ret = ia64_poke(child, sw, user_rbs_end, addr, val);
570                 if (ret < 0)
571                         return ret;
572         }
573         return 0;
574 }
575
576 typedef long (*syncfunc_t)(struct task_struct *, struct switch_stack *,
577                             unsigned long, unsigned long);
578
579 static void do_sync_rbs(struct unw_frame_info *info, void *arg)
580 {
581         struct pt_regs *pt;
582         unsigned long urbs_end;
583         syncfunc_t fn = arg;
584
585         if (unw_unwind_to_user(info) < 0)
586                 return;
587         pt = task_pt_regs(info->task);
588         urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL);
589
590         fn(info->task, info->sw, pt->ar_bspstore, urbs_end);
591 }
592
593 /*
594  * when a thread is stopped (ptraced), debugger might change thread's user
595  * stack (change memory directly), and we must avoid the RSE stored in kernel
596  * to override user stack (user space's RSE is newer than kernel's in the
597  * case). To workaround the issue, we copy kernel RSE to user RSE before the
598  * task is stopped, so user RSE has updated data.  we then copy user RSE to
599  * kernel after the task is resummed from traced stop and kernel will use the
600  * newer RSE to return to user. TIF_RESTORE_RSE is the flag to indicate we need
601  * synchronize user RSE to kernel.
602  */
603 void ia64_ptrace_stop(void)
604 {
605         if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE))
606                 return;
607         set_notify_resume(current);
608         unw_init_running(do_sync_rbs, ia64_sync_user_rbs);
609 }
610
611 /*
612  * This is called to read back the register backing store.
613  */
614 void ia64_sync_krbs(void)
615 {
616         clear_tsk_thread_flag(current, TIF_RESTORE_RSE);
617
618         unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs);
619 }
620
621 /*
622  * After PTRACE_ATTACH, a thread's register backing store area in user
623  * space is assumed to contain correct data whenever the thread is
624  * stopped.  arch_ptrace_stop takes care of this on tracing stops.
625  * But if the child was already stopped for job control when we attach
626  * to it, then it might not ever get into ptrace_stop by the time we
627  * want to examine the user memory containing the RBS.
628  */
629 void
630 ptrace_attach_sync_user_rbs (struct task_struct *child)
631 {
632         int stopped = 0;
633         struct unw_frame_info info;
634
635         /*
636          * If the child is in TASK_STOPPED, we need to change that to
637          * TASK_TRACED momentarily while we operate on it.  This ensures
638          * that the child won't be woken up and return to user mode while
639          * we are doing the sync.  (It can only be woken up for SIGKILL.)
640          */
641
642         read_lock(&tasklist_lock);
643         if (child->signal) {
644                 spin_lock_irq(&child->sighand->siglock);
645                 if (child->state == TASK_STOPPED &&
646                     !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
647                         set_notify_resume(child);
648
649                         child->state = TASK_TRACED;
650                         stopped = 1;
651                 }
652                 spin_unlock_irq(&child->sighand->siglock);
653         }
654         read_unlock(&tasklist_lock);
655
656         if (!stopped)
657                 return;
658
659         unw_init_from_blocked_task(&info, child);
660         do_sync_rbs(&info, ia64_sync_user_rbs);
661
662         /*
663          * Now move the child back into TASK_STOPPED if it should be in a
664          * job control stop, so that SIGCONT can be used to wake it up.
665          */
666         read_lock(&tasklist_lock);
667         if (child->signal) {
668                 spin_lock_irq(&child->sighand->siglock);
669                 if (child->state == TASK_TRACED &&
670                     (child->signal->flags & SIGNAL_STOP_STOPPED)) {
671                         child->state = TASK_STOPPED;
672                 }
673                 spin_unlock_irq(&child->sighand->siglock);
674         }
675         read_unlock(&tasklist_lock);
676 }
677
678 static inline int
679 thread_matches (struct task_struct *thread, unsigned long addr)
680 {
681         unsigned long thread_rbs_end;
682         struct pt_regs *thread_regs;
683
684         if (ptrace_check_attach(thread, 0) < 0)
685                 /*
686                  * If the thread is not in an attachable state, we'll
687                  * ignore it.  The net effect is that if ADDR happens
688                  * to overlap with the portion of the thread's
689                  * register backing store that is currently residing
690                  * on the thread's kernel stack, then ptrace() may end
691                  * up accessing a stale value.  But if the thread
692                  * isn't stopped, that's a problem anyhow, so we're
693                  * doing as well as we can...
694                  */
695                 return 0;
696
697         thread_regs = task_pt_regs(thread);
698         thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL);
699         if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end))
700                 return 0;
701
702         return 1;       /* looks like we've got a winner */
703 }
704
705 /*
706  * Write f32-f127 back to task->thread.fph if it has been modified.
707  */
708 inline void
709 ia64_flush_fph (struct task_struct *task)
710 {
711         struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
712
713         /*
714          * Prevent migrating this task while
715          * we're fiddling with the FPU state
716          */
717         preempt_disable();
718         if (ia64_is_local_fpu_owner(task) && psr->mfh) {
719                 psr->mfh = 0;
720                 task->thread.flags |= IA64_THREAD_FPH_VALID;
721                 ia64_save_fpu(&task->thread.fph[0]);
722         }
723         preempt_enable();
724 }
725
726 /*
727  * Sync the fph state of the task so that it can be manipulated
728  * through thread.fph.  If necessary, f32-f127 are written back to
729  * thread.fph or, if the fph state hasn't been used before, thread.fph
730  * is cleared to zeroes.  Also, access to f32-f127 is disabled to
731  * ensure that the task picks up the state from thread.fph when it
732  * executes again.
733  */
734 void
735 ia64_sync_fph (struct task_struct *task)
736 {
737         struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
738
739         ia64_flush_fph(task);
740         if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
741                 task->thread.flags |= IA64_THREAD_FPH_VALID;
742                 memset(&task->thread.fph, 0, sizeof(task->thread.fph));
743         }
744         ia64_drop_fpu(task);
745         psr->dfh = 1;
746 }
747
748 /*
749  * Change the machine-state of CHILD such that it will return via the normal
750  * kernel exit-path, rather than the syscall-exit path.
751  */
752 static void
753 convert_to_non_syscall (struct task_struct *child, struct pt_regs  *pt,
754                         unsigned long cfm)
755 {
756         struct unw_frame_info info, prev_info;
757         unsigned long ip, sp, pr;
758
759         unw_init_from_blocked_task(&info, child);
760         while (1) {
761                 prev_info = info;
762                 if (unw_unwind(&info) < 0)
763                         return;
764
765                 unw_get_sp(&info, &sp);
766                 if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
767                     < IA64_PT_REGS_SIZE) {
768                         dprintk("ptrace.%s: ran off the top of the kernel "
769                                 "stack\n", __func__);
770                         return;
771                 }
772                 if (unw_get_pr (&prev_info, &pr) < 0) {
773                         unw_get_rp(&prev_info, &ip);
774                         dprintk("ptrace.%s: failed to read "
775                                 "predicate register (ip=0x%lx)\n",
776                                 __func__, ip);
777                         return;
778                 }
779                 if (unw_is_intr_frame(&info)
780                     && (pr & (1UL << PRED_USER_STACK)))
781                         break;
782         }
783
784         /*
785          * Note: at the time of this call, the target task is blocked
786          * in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL
787          * (aka, "pLvSys") we redirect execution from
788          * .work_pending_syscall_end to .work_processed_kernel.
789          */
790         unw_get_pr(&prev_info, &pr);
791         pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL));
792         pr |=  (1UL << PRED_NON_SYSCALL);
793         unw_set_pr(&prev_info, pr);
794
795         pt->cr_ifs = (1UL << 63) | cfm;
796         /*
797          * Clear the memory that is NOT written on syscall-entry to
798          * ensure we do not leak kernel-state to user when execution
799          * resumes.
800          */
801         pt->r2 = 0;
802         pt->r3 = 0;
803         pt->r14 = 0;
804         memset(&pt->r16, 0, 16*8);      /* clear r16-r31 */
805         memset(&pt->f6, 0, 6*16);       /* clear f6-f11 */
806         pt->b7 = 0;
807         pt->ar_ccv = 0;
808         pt->ar_csd = 0;
809         pt->ar_ssd = 0;
810 }
811
812 static int
813 access_nat_bits (struct task_struct *child, struct pt_regs *pt,
814                  struct unw_frame_info *info,
815                  unsigned long *data, int write_access)
816 {
817         unsigned long regnum, nat_bits, scratch_unat, dummy = 0;
818         char nat = 0;
819
820         if (write_access) {
821                 nat_bits = *data;
822                 scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);
823                 if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) {
824                         dprintk("ptrace: failed to set ar.unat\n");
825                         return -1;
826                 }
827                 for (regnum = 4; regnum <= 7; ++regnum) {
828                         unw_get_gr(info, regnum, &dummy, &nat);
829                         unw_set_gr(info, regnum, dummy,
830                                    (nat_bits >> regnum) & 1);
831                 }
832         } else {
833                 if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) {
834                         dprintk("ptrace: failed to read ar.unat\n");
835                         return -1;
836                 }
837                 nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);
838                 for (regnum = 4; regnum <= 7; ++regnum) {
839                         unw_get_gr(info, regnum, &dummy, &nat);
840                         nat_bits |= (nat != 0) << regnum;
841                 }
842                 *data = nat_bits;
843         }
844         return 0;
845 }
846
847 static int
848 access_uarea (struct task_struct *child, unsigned long addr,
849               unsigned long *data, int write_access);
850
851 static long
852 ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
853 {
854         unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val;
855         struct unw_frame_info info;
856         struct ia64_fpreg fpval;
857         struct switch_stack *sw;
858         struct pt_regs *pt;
859         long ret, retval = 0;
860         char nat = 0;
861         int i;
862
863         if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs)))
864                 return -EIO;
865
866         pt = task_pt_regs(child);
867         sw = (struct switch_stack *) (child->thread.ksp + 16);
868         unw_init_from_blocked_task(&info, child);
869         if (unw_unwind_to_user(&info) < 0) {
870                 return -EIO;
871         }
872
873         if (((unsigned long) ppr & 0x7) != 0) {
874                 dprintk("ptrace:unaligned register address %p\n", ppr);
875                 return -EIO;
876         }
877
878         if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0
879             || access_uarea(child, PT_AR_EC, &ec, 0) < 0
880             || access_uarea(child, PT_AR_LC, &lc, 0) < 0
881             || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0
882             || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0
883             || access_uarea(child, PT_CFM, &cfm, 0)
884             || access_uarea(child, PT_NAT_BITS, &nat_bits, 0))
885                 return -EIO;
886
887         /* control regs */
888
889         retval |= __put_user(pt->cr_iip, &ppr->cr_iip);
890         retval |= __put_user(psr, &ppr->cr_ipsr);
891
892         /* app regs */
893
894         retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
895         retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
896         retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
897         retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
898         retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
899         retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
900
901         retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]);
902         retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]);
903         retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]);
904         retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]);
905         retval |= __put_user(cfm, &ppr->cfm);
906
907         /* gr1-gr3 */
908
909         retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long));
910         retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2);
911
912         /* gr4-gr7 */
913
914         for (i = 4; i < 8; i++) {
915                 if (unw_access_gr(&info, i, &val, &nat, 0) < 0)
916                         return -EIO;
917                 retval |= __put_user(val, &ppr->gr[i]);
918         }
919
920         /* gr8-gr11 */
921
922         retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4);
923
924         /* gr12-gr15 */
925
926         retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2);
927         retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long));
928         retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long));
929
930         /* gr16-gr31 */
931
932         retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16);
933
934         /* b0 */
935
936         retval |= __put_user(pt->b0, &ppr->br[0]);
937
938         /* b1-b5 */
939
940         for (i = 1; i < 6; i++) {
941                 if (unw_access_br(&info, i, &val, 0) < 0)
942                         return -EIO;
943                 __put_user(val, &ppr->br[i]);
944         }
945
946         /* b6-b7 */
947
948         retval |= __put_user(pt->b6, &ppr->br[6]);
949         retval |= __put_user(pt->b7, &ppr->br[7]);
950
951         /* fr2-fr5 */
952
953         for (i = 2; i < 6; i++) {
954                 if (unw_get_fr(&info, i, &fpval) < 0)
955                         return -EIO;
956                 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
957         }
958
959         /* fr6-fr11 */
960
961         retval |= __copy_to_user(&ppr->fr[6], &pt->f6,
962                                  sizeof(struct ia64_fpreg) * 6);
963
964         /* fp scratch regs(12-15) */
965
966         retval |= __copy_to_user(&ppr->fr[12], &sw->f12,
967                                  sizeof(struct ia64_fpreg) * 4);
968
969         /* fr16-fr31 */
970
971         for (i = 16; i < 32; i++) {
972                 if (unw_get_fr(&info, i, &fpval) < 0)
973                         return -EIO;
974                 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
975         }
976
977         /* fph */
978
979         ia64_flush_fph(child);
980         retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph,
981                                  sizeof(ppr->fr[32]) * 96);
982
983         /*  preds */
984
985         retval |= __put_user(pt->pr, &ppr->pr);
986
987         /* nat bits */
988
989         retval |= __put_user(nat_bits, &ppr->nat);
990
991         ret = retval ? -EIO : 0;
992         return ret;
993 }
994
995 static long
996 ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
997 {
998         unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
999         struct unw_frame_info info;
1000         struct switch_stack *sw;
1001         struct ia64_fpreg fpval;
1002         struct pt_regs *pt;
1003         long ret, retval = 0;
1004         int i;
1005
1006         memset(&fpval, 0, sizeof(fpval));
1007
1008         if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs)))
1009                 return -EIO;
1010
1011         pt = task_pt_regs(child);
1012         sw = (struct switch_stack *) (child->thread.ksp + 16);
1013         unw_init_from_blocked_task(&info, child);
1014         if (unw_unwind_to_user(&info) < 0) {
1015                 return -EIO;
1016         }
1017
1018         if (((unsigned long) ppr & 0x7) != 0) {
1019                 dprintk("ptrace:unaligned register address %p\n", ppr);
1020                 return -EIO;
1021         }
1022
1023         /* control regs */
1024
1025         retval |= __get_user(pt->cr_iip, &ppr->cr_iip);
1026         retval |= __get_user(psr, &ppr->cr_ipsr);
1027
1028         /* app regs */
1029
1030         retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
1031         retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
1032         retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
1033         retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
1034         retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
1035         retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
1036
1037         retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]);
1038         retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]);
1039         retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]);
1040         retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]);
1041         retval |= __get_user(cfm, &ppr->cfm);
1042
1043         /* gr1-gr3 */
1044
1045         retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long));
1046         retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2);
1047
1048         /* gr4-gr7 */
1049
1050         for (i = 4; i < 8; i++) {
1051                 retval |= __get_user(val, &ppr->gr[i]);
1052                 /* NaT bit will be set via PT_NAT_BITS: */
1053                 if (unw_set_gr(&info, i, val, 0) < 0)
1054                         return -EIO;
1055         }
1056
1057         /* gr8-gr11 */
1058
1059         retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4);
1060
1061         /* gr12-gr15 */
1062
1063         retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2);
1064         retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long));
1065         retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long));
1066
1067         /* gr16-gr31 */
1068
1069         retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16);
1070
1071         /* b0 */
1072
1073         retval |= __get_user(pt->b0, &ppr->br[0]);
1074
1075         /* b1-b5 */
1076
1077         for (i = 1; i < 6; i++) {
1078                 retval |= __get_user(val, &ppr->br[i]);
1079                 unw_set_br(&info, i, val);
1080         }
1081
1082         /* b6-b7 */
1083
1084         retval |= __get_user(pt->b6, &ppr->br[6]);
1085         retval |= __get_user(pt->b7, &ppr->br[7]);
1086
1087         /* fr2-fr5 */
1088
1089         for (i = 2; i < 6; i++) {
1090                 retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval));
1091                 if (unw_set_fr(&info, i, fpval) < 0)
1092                         return -EIO;
1093         }
1094
1095         /* fr6-fr11 */
1096
1097         retval |= __copy_from_user(&pt->f6, &ppr->fr[6],
1098                                    sizeof(ppr->fr[6]) * 6);
1099
1100         /* fp scratch regs(12-15) */
1101
1102         retval |= __copy_from_user(&sw->f12, &ppr->fr[12],
1103                                    sizeof(ppr->fr[12]) * 4);
1104
1105         /* fr16-fr31 */
1106
1107         for (i = 16; i < 32; i++) {
1108                 retval |= __copy_from_user(&fpval, &ppr->fr[i],
1109                                            sizeof(fpval));
1110                 if (unw_set_fr(&info, i, fpval) < 0)
1111                         return -EIO;
1112         }
1113
1114         /* fph */
1115
1116         ia64_sync_fph(child);
1117         retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32],
1118                                    sizeof(ppr->fr[32]) * 96);
1119
1120         /* preds */
1121
1122         retval |= __get_user(pt->pr, &ppr->pr);
1123
1124         /* nat bits */
1125
1126         retval |= __get_user(nat_bits, &ppr->nat);
1127
1128         retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
1129         retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
1130         retval |= access_uarea(child, PT_AR_EC, &ec, 1);
1131         retval |= access_uarea(child, PT_AR_LC, &lc, 1);
1132         retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
1133         retval |= access_uarea(child, PT_AR_BSP, &bsp, 1);
1134         retval |= access_uarea(child, PT_CFM, &cfm, 1);
1135         retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1);
1136
1137         ret = retval ? -EIO : 0;
1138         return ret;
1139 }
1140
1141 void
1142 user_enable_single_step (struct task_struct *child)
1143 {
1144         struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1145
1146         set_tsk_thread_flag(child, TIF_SINGLESTEP);
1147         child_psr->ss = 1;
1148 }
1149
1150 void
1151 user_enable_block_step (struct task_struct *child)
1152 {
1153         struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1154
1155         set_tsk_thread_flag(child, TIF_SINGLESTEP);
1156         child_psr->tb = 1;
1157 }
1158
1159 void
1160 user_disable_single_step (struct task_struct *child)
1161 {
1162         struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1163
1164         /* make sure the single step/taken-branch trap bits are not set: */
1165         clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1166         child_psr->ss = 0;
1167         child_psr->tb = 0;
1168 }
1169
1170 /*
1171  * Called by kernel/ptrace.c when detaching..
1172  *
1173  * Make sure the single step bit is not set.
1174  */
1175 void
1176 ptrace_disable (struct task_struct *child)
1177 {
1178         user_disable_single_step(child);
1179 }
1180
1181 long
1182 arch_ptrace (struct task_struct *child, long request, long addr, long data)
1183 {
1184         switch (request) {
1185         case PTRACE_PEEKTEXT:
1186         case PTRACE_PEEKDATA:
1187                 /* read word at location addr */
1188                 if (access_process_vm(child, addr, &data, sizeof(data), 0)
1189                     != sizeof(data))
1190                         return -EIO;
1191                 /* ensure return value is not mistaken for error code */
1192                 force_successful_syscall_return();
1193                 return data;
1194
1195         /* PTRACE_POKETEXT and PTRACE_POKEDATA is handled
1196          * by the generic ptrace_request().
1197          */
1198
1199         case PTRACE_PEEKUSR:
1200                 /* read the word at addr in the USER area */
1201                 if (access_uarea(child, addr, &data, 0) < 0)
1202                         return -EIO;
1203                 /* ensure return value is not mistaken for error code */
1204                 force_successful_syscall_return();
1205                 return data;
1206
1207         case PTRACE_POKEUSR:
1208                 /* write the word at addr in the USER area */
1209                 if (access_uarea(child, addr, &data, 1) < 0)
1210                         return -EIO;
1211                 return 0;
1212
1213         case PTRACE_OLD_GETSIGINFO:
1214                 /* for backwards-compatibility */
1215                 return ptrace_request(child, PTRACE_GETSIGINFO, addr, data);
1216
1217         case PTRACE_OLD_SETSIGINFO:
1218                 /* for backwards-compatibility */
1219                 return ptrace_request(child, PTRACE_SETSIGINFO, addr, data);
1220
1221         case PTRACE_GETREGS:
1222                 return ptrace_getregs(child,
1223                                       (struct pt_all_user_regs __user *) data);
1224
1225         case PTRACE_SETREGS:
1226                 return ptrace_setregs(child,
1227                                       (struct pt_all_user_regs __user *) data);
1228
1229         default:
1230                 return ptrace_request(child, request, addr, data);
1231         }
1232 }
1233
1234
1235 /* "asmlinkage" so the input arguments are preserved... */
1236
1237 asmlinkage long
1238 syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
1239                      long arg4, long arg5, long arg6, long arg7,
1240                      struct pt_regs regs)
1241 {
1242         if (test_thread_flag(TIF_SYSCALL_TRACE))
1243                 if (tracehook_report_syscall_entry(&regs))
1244                         return -ENOSYS;
1245
1246         /* copy user rbs to kernel rbs */
1247         if (test_thread_flag(TIF_RESTORE_RSE))
1248                 ia64_sync_krbs();
1249
1250         if (unlikely(current->audit_context)) {
1251                 long syscall;
1252                 int arch;
1253
1254                 if (IS_IA32_PROCESS(&regs)) {
1255                         syscall = regs.r1;
1256                         arch = AUDIT_ARCH_I386;
1257                 } else {
1258                         syscall = regs.r15;
1259                         arch = AUDIT_ARCH_IA64;
1260                 }
1261
1262                 audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3);
1263         }
1264
1265         return 0;
1266 }
1267
1268 /* "asmlinkage" so the input arguments are preserved... */
1269
1270 asmlinkage void
1271 syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1272                      long arg4, long arg5, long arg6, long arg7,
1273                      struct pt_regs regs)
1274 {
1275         int step;
1276
1277         if (unlikely(current->audit_context)) {
1278                 int success = AUDITSC_RESULT(regs.r10);
1279                 long result = regs.r8;
1280
1281                 if (success != AUDITSC_SUCCESS)
1282                         result = -result;
1283                 audit_syscall_exit(success, result);
1284         }
1285
1286         step = test_thread_flag(TIF_SINGLESTEP);
1287         if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1288                 tracehook_report_syscall_exit(&regs, step);
1289
1290         /* copy user rbs to kernel rbs */
1291         if (test_thread_flag(TIF_RESTORE_RSE))
1292                 ia64_sync_krbs();
1293 }
1294
1295 /* Utrace implementation starts here */
1296 struct regset_get {
1297         void *kbuf;
1298         void __user *ubuf;
1299 };
1300
1301 struct regset_set {
1302         const void *kbuf;
1303         const void __user *ubuf;
1304 };
1305
1306 struct regset_getset {
1307         struct task_struct *target;
1308         const struct user_regset *regset;
1309         union {
1310                 struct regset_get get;
1311                 struct regset_set set;
1312         } u;
1313         unsigned int pos;
1314         unsigned int count;
1315         int ret;
1316 };
1317
1318 static int
1319 access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info,
1320                 unsigned long addr, unsigned long *data, int write_access)
1321 {
1322         struct pt_regs *pt;
1323         unsigned long *ptr = NULL;
1324         int ret;
1325         char nat = 0;
1326
1327         pt = task_pt_regs(target);
1328         switch (addr) {
1329         case ELF_GR_OFFSET(1):
1330                 ptr = &pt->r1;
1331                 break;
1332         case ELF_GR_OFFSET(2):
1333         case ELF_GR_OFFSET(3):
1334                 ptr = (void *)&pt->r2 + (addr - ELF_GR_OFFSET(2));
1335                 break;
1336         case ELF_GR_OFFSET(4) ... ELF_GR_OFFSET(7):
1337                 if (write_access) {
1338                         /* read NaT bit first: */
1339                         unsigned long dummy;
1340
1341                         ret = unw_get_gr(info, addr/8, &dummy, &nat);
1342                         if (ret < 0)
1343                                 return ret;
1344                 }
1345                 return unw_access_gr(info, addr/8, data, &nat, write_access);
1346         case ELF_GR_OFFSET(8) ... ELF_GR_OFFSET(11):
1347                 ptr = (void *)&pt->r8 + addr - ELF_GR_OFFSET(8);
1348                 break;
1349         case ELF_GR_OFFSET(12):
1350         case ELF_GR_OFFSET(13):
1351                 ptr = (void *)&pt->r12 + addr - ELF_GR_OFFSET(12);
1352                 break;
1353         case ELF_GR_OFFSET(14):
1354                 ptr = &pt->r14;
1355                 break;
1356         case ELF_GR_OFFSET(15):
1357                 ptr = &pt->r15;
1358         }
1359         if (write_access)
1360                 *ptr = *data;
1361         else
1362                 *data = *ptr;
1363         return 0;
1364 }
1365
1366 static int
1367 access_elf_breg(struct task_struct *target, struct unw_frame_info *info,
1368                 unsigned long addr, unsigned long *data, int write_access)
1369 {
1370         struct pt_regs *pt;
1371         unsigned long *ptr = NULL;
1372
1373         pt = task_pt_regs(target);
1374         switch (addr) {
1375         case ELF_BR_OFFSET(0):
1376                 ptr = &pt->b0;
1377                 break;
1378         case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5):
1379                 return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8,
1380                                      data, write_access);
1381         case ELF_BR_OFFSET(6):
1382                 ptr = &pt->b6;
1383                 break;
1384         case ELF_BR_OFFSET(7):
1385                 ptr = &pt->b7;
1386         }
1387         if (write_access)
1388                 *ptr = *data;
1389         else
1390                 *data = *ptr;
1391         return 0;
1392 }
1393
1394 static int
1395 access_elf_areg(struct task_struct *target, struct unw_frame_info *info,
1396                 unsigned long addr, unsigned long *data, int write_access)
1397 {
1398         struct pt_regs *pt;
1399         unsigned long cfm, urbs_end;
1400         unsigned long *ptr = NULL;
1401
1402         pt = task_pt_regs(target);
1403         if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) {
1404                 switch (addr) {
1405                 case ELF_AR_RSC_OFFSET:
1406                         /* force PL3 */
1407                         if (write_access)
1408                                 pt->ar_rsc = *data | (3 << 2);
1409                         else
1410                                 *data = pt->ar_rsc;
1411                         return 0;
1412                 case ELF_AR_BSP_OFFSET:
1413                         /*
1414                          * By convention, we use PT_AR_BSP to refer to
1415                          * the end of the user-level backing store.
1416                          * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
1417                          * to get the real value of ar.bsp at the time
1418                          * the kernel was entered.
1419                          *
1420                          * Furthermore, when changing the contents of
1421                          * PT_AR_BSP (or PT_CFM) while the task is
1422                          * blocked in a system call, convert the state
1423                          * so that the non-system-call exit
1424                          * path is used.  This ensures that the proper
1425                          * state will be picked up when resuming
1426                          * execution.  However, it *also* means that
1427                          * once we write PT_AR_BSP/PT_CFM, it won't be
1428                          * possible to modify the syscall arguments of
1429                          * the pending system call any longer.  This
1430                          * shouldn't be an issue because modifying
1431                          * PT_AR_BSP/PT_CFM generally implies that
1432                          * we're either abandoning the pending system
1433                          * call or that we defer it's re-execution
1434                          * (e.g., due to GDB doing an inferior
1435                          * function call).
1436                          */
1437                         urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1438                         if (write_access) {
1439                                 if (*data != urbs_end) {
1440                                         if (in_syscall(pt))
1441                                                 convert_to_non_syscall(target,
1442                                                                        pt,
1443                                                                        cfm);
1444                                         /*
1445                                          * Simulate user-level write
1446                                          * of ar.bsp:
1447                                          */
1448                                         pt->loadrs = 0;
1449                                         pt->ar_bspstore = *data;
1450                                 }
1451                         } else
1452                                 *data = urbs_end;
1453                         return 0;
1454                 case ELF_AR_BSPSTORE_OFFSET:
1455                         ptr = &pt->ar_bspstore;
1456                         break;
1457                 case ELF_AR_RNAT_OFFSET:
1458                         ptr = &pt->ar_rnat;
1459                         break;
1460                 case ELF_AR_CCV_OFFSET:
1461                         ptr = &pt->ar_ccv;
1462                         break;
1463                 case ELF_AR_UNAT_OFFSET:
1464                         ptr = &pt->ar_unat;
1465                         break;
1466                 case ELF_AR_FPSR_OFFSET:
1467                         ptr = &pt->ar_fpsr;
1468                         break;
1469                 case ELF_AR_PFS_OFFSET:
1470                         ptr = &pt->ar_pfs;
1471                         break;
1472                 case ELF_AR_LC_OFFSET:
1473                         return unw_access_ar(info, UNW_AR_LC, data,
1474                                              write_access);
1475                 case ELF_AR_EC_OFFSET:
1476                         return unw_access_ar(info, UNW_AR_EC, data,
1477                                              write_access);
1478                 case ELF_AR_CSD_OFFSET:
1479                         ptr = &pt->ar_csd;
1480                         break;
1481                 case ELF_AR_SSD_OFFSET:
1482                         ptr = &pt->ar_ssd;
1483                 }
1484         } else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) {
1485                 switch (addr) {
1486                 case ELF_CR_IIP_OFFSET:
1487                         ptr = &pt->cr_iip;
1488                         break;
1489                 case ELF_CFM_OFFSET:
1490                         urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1491                         if (write_access) {
1492                                 if (((cfm ^ *data) & PFM_MASK) != 0) {
1493                                         if (in_syscall(pt))
1494                                                 convert_to_non_syscall(target,
1495                                                                        pt,
1496                                                                        cfm);
1497                                         pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
1498                                                       | (*data & PFM_MASK));
1499                                 }
1500                         } else
1501                                 *data = cfm;
1502                         return 0;
1503                 case ELF_CR_IPSR_OFFSET:
1504                         if (write_access) {
1505                                 unsigned long tmp = *data;
1506                                 /* psr.ri==3 is a reserved value: SDM 2:25 */
1507                                 if ((tmp & IA64_PSR_RI) == IA64_PSR_RI)
1508                                         tmp &= ~IA64_PSR_RI;
1509                                 pt->cr_ipsr = ((tmp & IPSR_MASK)
1510                                                | (pt->cr_ipsr & ~IPSR_MASK));
1511                         } else
1512                                 *data = (pt->cr_ipsr & IPSR_MASK);
1513                         return 0;
1514                 }
1515         } else if (addr == ELF_NAT_OFFSET)
1516                 return access_nat_bits(target, pt, info,
1517                                        data, write_access);
1518         else if (addr == ELF_PR_OFFSET)
1519                 ptr = &pt->pr;
1520         else
1521                 return -1;
1522
1523         if (write_access)
1524                 *ptr = *data;
1525         else
1526                 *data = *ptr;
1527
1528         return 0;
1529 }
1530
1531 static int
1532 access_elf_reg(struct task_struct *target, struct unw_frame_info *info,
1533                 unsigned long addr, unsigned long *data, int write_access)
1534 {
1535         if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(15))
1536                 return access_elf_gpreg(target, info, addr, data, write_access);
1537         else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7))
1538                 return access_elf_breg(target, info, addr, data, write_access);
1539         else
1540                 return access_elf_areg(target, info, addr, data, write_access);
1541 }
1542
1543 void do_gpregs_get(struct unw_frame_info *info, void *arg)
1544 {
1545         struct pt_regs *pt;
1546         struct regset_getset *dst = arg;
1547         elf_greg_t tmp[16];
1548         unsigned int i, index, min_copy;
1549
1550         if (unw_unwind_to_user(info) < 0)
1551                 return;
1552
1553         /*
1554          * coredump format:
1555          *      r0-r31
1556          *      NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
1557          *      predicate registers (p0-p63)
1558          *      b0-b7
1559          *      ip cfm user-mask
1560          *      ar.rsc ar.bsp ar.bspstore ar.rnat
1561          *      ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
1562          */
1563
1564
1565         /* Skip r0 */
1566         if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1567                 dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1568                                                       &dst->u.get.kbuf,
1569                                                       &dst->u.get.ubuf,
1570                                                       0, ELF_GR_OFFSET(1));
1571                 if (dst->ret || dst->count == 0)
1572                         return;
1573         }
1574
1575         /* gr1 - gr15 */
1576         if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1577                 index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1578                 min_copy = ELF_GR_OFFSET(16) > (dst->pos + dst->count) ?
1579                          (dst->pos + dst->count) : ELF_GR_OFFSET(16);
1580                 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1581                                 index++)
1582                         if (access_elf_reg(dst->target, info, i,
1583                                                 &tmp[index], 0) < 0) {
1584                                 dst->ret = -EIO;
1585                                 return;
1586                         }
1587                 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1588                                 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1589                                 ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1590                 if (dst->ret || dst->count == 0)
1591                         return;
1592         }
1593
1594         /* r16-r31 */
1595         if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1596                 pt = task_pt_regs(dst->target);
1597                 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1598                                 &dst->u.get.kbuf, &dst->u.get.ubuf, &pt->r16,
1599                                 ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
1600                 if (dst->ret || dst->count == 0)
1601                         return;
1602         }
1603
1604         /* nat, pr, b0 - b7 */
1605         if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1606                 index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1607                 min_copy = ELF_CR_IIP_OFFSET > (dst->pos + dst->count) ?
1608                          (dst->pos + dst->count) : ELF_CR_IIP_OFFSET;
1609                 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1610                                 index++)
1611                         if (access_elf_reg(dst->target, info, i,
1612                                                 &tmp[index], 0) < 0) {
1613                                 dst->ret = -EIO;
1614                                 return;
1615                         }
1616                 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1617                                 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1618                                 ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
1619                 if (dst->ret || dst->count == 0)
1620                         return;
1621         }
1622
1623         /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
1624          * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
1625          */
1626         if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1627                 index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1628                 min_copy = ELF_AR_END_OFFSET > (dst->pos + dst->count) ?
1629                          (dst->pos + dst->count) : ELF_AR_END_OFFSET;
1630                 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1631                                 index++)
1632                         if (access_elf_reg(dst->target, info, i,
1633                                                 &tmp[index], 0) < 0) {
1634                                 dst->ret = -EIO;
1635                                 return;
1636                         }
1637                 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1638                                 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1639                                 ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
1640         }
1641 }
1642
1643 void do_gpregs_set(struct unw_frame_info *info, void *arg)
1644 {
1645         struct pt_regs *pt;
1646         struct regset_getset *dst = arg;
1647         elf_greg_t tmp[16];
1648         unsigned int i, index;
1649
1650         if (unw_unwind_to_user(info) < 0)
1651                 return;
1652
1653         /* Skip r0 */
1654         if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1655                 dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1656                                                        &dst->u.set.kbuf,
1657                                                        &dst->u.set.ubuf,
1658                                                        0, ELF_GR_OFFSET(1));
1659                 if (dst->ret || dst->count == 0)
1660                         return;
1661         }
1662
1663         /* gr1-gr15 */
1664         if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1665                 i = dst->pos;
1666                 index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1667                 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1668                                 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1669                                 ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1670                 if (dst->ret)
1671                         return;
1672                 for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
1673                         if (access_elf_reg(dst->target, info, i,
1674                                                 &tmp[index], 1) < 0) {
1675                                 dst->ret = -EIO;
1676                                 return;
1677                         }
1678                 if (dst->count == 0)
1679                         return;
1680         }
1681
1682         /* gr16-gr31 */
1683         if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1684                 pt = task_pt_regs(dst->target);
1685                 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1686                                 &dst->u.set.kbuf, &dst->u.set.ubuf, &pt->r16,
1687                                 ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
1688                 if (dst->ret || dst->count == 0)
1689                         return;
1690         }
1691
1692         /* nat, pr, b0 - b7 */
1693         if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1694                 i = dst->pos;
1695                 index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1696                 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1697                                 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1698                                 ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
1699                 if (dst->ret)
1700                         return;
1701                 for (; i < dst->pos; i += sizeof(elf_greg_t), index++)
1702                         if (access_elf_reg(dst->target, info, i,
1703                                                 &tmp[index], 1) < 0) {
1704                                 dst->ret = -EIO;
1705                                 return;
1706                         }
1707                 if (dst->count == 0)
1708                         return;
1709         }
1710
1711         /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
1712          * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
1713          */
1714         if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1715                 i = dst->pos;
1716                 index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1717                 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1718                                 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1719                                 ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
1720                 if (dst->ret)
1721                         return;
1722                 for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
1723                         if (access_elf_reg(dst->target, info, i,
1724                                                 &tmp[index], 1) < 0) {
1725                                 dst->ret = -EIO;
1726                                 return;
1727                         }
1728         }
1729 }
1730
1731 #define ELF_FP_OFFSET(i)        (i * sizeof(elf_fpreg_t))
1732
1733 void do_fpregs_get(struct unw_frame_info *info, void *arg)
1734 {
1735         struct regset_getset *dst = arg;
1736         struct task_struct *task = dst->target;
1737         elf_fpreg_t tmp[30];
1738         int index, min_copy, i;
1739
1740         if (unw_unwind_to_user(info) < 0)
1741                 return;
1742
1743         /* Skip pos 0 and 1 */
1744         if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1745                 dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1746                                                       &dst->u.get.kbuf,
1747                                                       &dst->u.get.ubuf,
1748                                                       0, ELF_FP_OFFSET(2));
1749                 if (dst->count == 0 || dst->ret)
1750                         return;
1751         }
1752
1753         /* fr2-fr31 */
1754         if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1755                 index = (dst->pos - ELF_FP_OFFSET(2)) / sizeof(elf_fpreg_t);
1756
1757                 min_copy = min(((unsigned int)ELF_FP_OFFSET(32)),
1758                                 dst->pos + dst->count);
1759                 for (i = dst->pos; i < min_copy; i += sizeof(elf_fpreg_t),
1760                                 index++)
1761                         if (unw_get_fr(info, i / sizeof(elf_fpreg_t),
1762                                          &tmp[index])) {
1763                                 dst->ret = -EIO;
1764                                 return;
1765                         }
1766                 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1767                                 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1768                                 ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1769                 if (dst->count == 0 || dst->ret)
1770                         return;
1771         }
1772
1773         /* fph */
1774         if (dst->count > 0) {
1775                 ia64_flush_fph(dst->target);
1776                 if (task->thread.flags & IA64_THREAD_FPH_VALID)
1777                         dst->ret = user_regset_copyout(
1778                                 &dst->pos, &dst->count,
1779                                 &dst->u.get.kbuf, &dst->u.get.ubuf,
1780                                 &dst->target->thread.fph,
1781                                 ELF_FP_OFFSET(32), -1);
1782                 else
1783                         /* Zero fill instead.  */
1784                         dst->ret = user_regset_copyout_zero(
1785                                 &dst->pos, &dst->count,
1786                                 &dst->u.get.kbuf, &dst->u.get.ubuf,
1787                                 ELF_FP_OFFSET(32), -1);
1788         }
1789 }
1790
1791 void do_fpregs_set(struct unw_frame_info *info, void *arg)
1792 {
1793         struct regset_getset *dst = arg;
1794         elf_fpreg_t fpreg, tmp[30];
1795         int index, start, end;
1796
1797         if (unw_unwind_to_user(info) < 0)
1798                 return;
1799
1800         /* Skip pos 0 and 1 */
1801         if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1802                 dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1803                                                        &dst->u.set.kbuf,
1804                                                        &dst->u.set.ubuf,
1805                                                        0, ELF_FP_OFFSET(2));
1806                 if (dst->count == 0 || dst->ret)
1807                         return;
1808         }
1809
1810         /* fr2-fr31 */
1811         if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1812                 start = dst->pos;
1813                 end = min(((unsigned int)ELF_FP_OFFSET(32)),
1814                          dst->pos + dst->count);
1815                 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1816                                 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1817                                 ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1818                 if (dst->ret)
1819                         return;
1820
1821                 if (start & 0xF) { /* only write high part */
1822                         if (unw_get_fr(info, start / sizeof(elf_fpreg_t),
1823                                          &fpreg)) {
1824                                 dst->ret = -EIO;
1825                                 return;
1826                         }
1827                         tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0]
1828                                 = fpreg.u.bits[0];
1829                         start &= ~0xFUL;
1830                 }
1831                 if (end & 0xF) { /* only write low part */
1832                         if (unw_get_fr(info, end / sizeof(elf_fpreg_t),
1833                                         &fpreg)) {
1834                                 dst->ret = -EIO;
1835                                 return;
1836                         }
1837                         tmp[end / sizeof(elf_fpreg_t) - 2].u.bits[1]
1838                                 = fpreg.u.bits[1];
1839                         end = (end + 0xF) & ~0xFUL;
1840                 }
1841
1842                 for ( ; start < end ; start += sizeof(elf_fpreg_t)) {
1843                         index = start / sizeof(elf_fpreg_t);
1844                         if (unw_set_fr(info, index, tmp[index - 2])) {
1845                                 dst->ret = -EIO;
1846                                 return;
1847                         }
1848                 }
1849                 if (dst->ret || dst->count == 0)
1850                         return;
1851         }
1852
1853         /* fph */
1854         if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) {
1855                 ia64_sync_fph(dst->target);
1856                 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1857                                                 &dst->u.set.kbuf,
1858                                                 &dst->u.set.ubuf,
1859                                                 &dst->target->thread.fph,
1860                                                 ELF_FP_OFFSET(32), -1);
1861         }
1862 }
1863
1864 static int
1865 do_regset_call(void (*call)(struct unw_frame_info *, void *),
1866                struct task_struct *target,
1867                const struct user_regset *regset,
1868                unsigned int pos, unsigned int count,
1869                const void *kbuf, const void __user *ubuf)
1870 {
1871         struct regset_getset info = { .target = target, .regset = regset,
1872                                  .pos = pos, .count = count,
1873                                  .u.set = { .kbuf = kbuf, .ubuf = ubuf },
1874                                  .ret = 0 };
1875
1876         if (target == current)
1877                 unw_init_running(call, &info);
1878         else {
1879                 struct unw_frame_info ufi;
1880                 memset(&ufi, 0, sizeof(ufi));
1881                 unw_init_from_blocked_task(&ufi, target);
1882                 (*call)(&ufi, &info);
1883         }
1884
1885         return info.ret;
1886 }
1887
1888 static int
1889 gpregs_get(struct task_struct *target,
1890            const struct user_regset *regset,
1891            unsigned int pos, unsigned int count,
1892            void *kbuf, void __user *ubuf)
1893 {
1894         return do_regset_call(do_gpregs_get, target, regset, pos, count,
1895                 kbuf, ubuf);
1896 }
1897
1898 static int gpregs_set(struct task_struct *target,
1899                 const struct user_regset *regset,
1900                 unsigned int pos, unsigned int count,
1901                 const void *kbuf, const void __user *ubuf)
1902 {
1903         return do_regset_call(do_gpregs_set, target, regset, pos, count,
1904                 kbuf, ubuf);
1905 }
1906
1907 static void do_gpregs_writeback(struct unw_frame_info *info, void *arg)
1908 {
1909         do_sync_rbs(info, ia64_sync_user_rbs);
1910 }
1911
1912 /*
1913  * This is called to write back the register backing store.
1914  * ptrace does this before it stops, so that a tracer reading the user
1915  * memory after the thread stops will get the current register data.
1916  */
1917 static int
1918 gpregs_writeback(struct task_struct *target,
1919                  const struct user_regset *regset,
1920                  int now)
1921 {
1922         if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE))
1923                 return 0;
1924         set_notify_resume(target);
1925         return do_regset_call(do_gpregs_writeback, target, regset, 0, 0,
1926                 NULL, NULL);
1927 }
1928
1929 static int
1930 fpregs_active(struct task_struct *target, const struct user_regset *regset)
1931 {
1932         return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32;
1933 }
1934
1935 static int fpregs_get(struct task_struct *target,
1936                 const struct user_regset *regset,
1937                 unsigned int pos, unsigned int count,
1938                 void *kbuf, void __user *ubuf)
1939 {
1940         return do_regset_call(do_fpregs_get, target, regset, pos, count,
1941                 kbuf, ubuf);
1942 }
1943
1944 static int fpregs_set(struct task_struct *target,
1945                 const struct user_regset *regset,
1946                 unsigned int pos, unsigned int count,
1947                 const void *kbuf, const void __user *ubuf)
1948 {
1949         return do_regset_call(do_fpregs_set, target, regset, pos, count,
1950                 kbuf, ubuf);
1951 }
1952
1953 static int
1954 access_uarea(struct task_struct *child, unsigned long addr,
1955               unsigned long *data, int write_access)
1956 {
1957         unsigned int pos = -1; /* an invalid value */
1958         int ret;
1959         unsigned long *ptr, regnum;
1960
1961         if ((addr & 0x7) != 0) {
1962                 dprintk("ptrace: unaligned register address 0x%lx\n", addr);
1963                 return -1;
1964         }
1965         if ((addr >= PT_NAT_BITS + 8 && addr < PT_F2) ||
1966                 (addr >= PT_R7 + 8 && addr < PT_B1) ||
1967                 (addr >= PT_AR_LC + 8 && addr < PT_CR_IPSR) ||
1968                 (addr >= PT_AR_SSD + 8 && addr < PT_DBR)) {
1969                 dprintk("ptrace: rejecting access to register "
1970                                         "address 0x%lx\n", addr);
1971                 return -1;
1972         }
1973
1974         switch (addr) {
1975         case PT_F32 ... (PT_F127 + 15):
1976                 pos = addr - PT_F32 + ELF_FP_OFFSET(32);
1977                 break;
1978         case PT_F2 ... (PT_F5 + 15):
1979                 pos = addr - PT_F2 + ELF_FP_OFFSET(2);
1980                 break;
1981         case PT_F10 ... (PT_F31 + 15):
1982                 pos = addr - PT_F10 + ELF_FP_OFFSET(10);
1983                 break;
1984         case PT_F6 ... (PT_F9 + 15):
1985                 pos = addr - PT_F6 + ELF_FP_OFFSET(6);
1986                 break;
1987         }
1988
1989         if (pos != -1) {
1990                 if (write_access)
1991                         ret = fpregs_set(child, NULL, pos,
1992                                 sizeof(unsigned long), data, NULL);
1993                 else
1994                         ret = fpregs_get(child, NULL, pos,
1995                                 sizeof(unsigned long), data, NULL);
1996                 if (ret != 0)
1997                         return -1;
1998                 return 0;
1999         }
2000
2001         switch (addr) {
2002         case PT_NAT_BITS:
2003                 pos = ELF_NAT_OFFSET;
2004                 break;
2005         case PT_R4 ... PT_R7:
2006                 pos = addr - PT_R4 + ELF_GR_OFFSET(4);
2007                 break;
2008         case PT_B1 ... PT_B5:
2009                 pos = addr - PT_B1 + ELF_BR_OFFSET(1);
2010                 break;
2011         case PT_AR_EC:
2012                 pos = ELF_AR_EC_OFFSET;
2013                 break;
2014         case PT_AR_LC:
2015                 pos = ELF_AR_LC_OFFSET;
2016                 break;
2017         case PT_CR_IPSR:
2018                 pos = ELF_CR_IPSR_OFFSET;
2019                 break;
2020         case PT_CR_IIP:
2021                 pos = ELF_CR_IIP_OFFSET;
2022                 break;
2023         case PT_CFM:
2024                 pos = ELF_CFM_OFFSET;
2025                 break;
2026         case PT_AR_UNAT:
2027                 pos = ELF_AR_UNAT_OFFSET;
2028                 break;
2029         case PT_AR_PFS:
2030                 pos = ELF_AR_PFS_OFFSET;
2031                 break;
2032         case PT_AR_RSC:
2033                 pos = ELF_AR_RSC_OFFSET;
2034                 break;
2035         case PT_AR_RNAT:
2036                 pos = ELF_AR_RNAT_OFFSET;
2037                 break;
2038         case PT_AR_BSPSTORE:
2039                 pos = ELF_AR_BSPSTORE_OFFSET;
2040                 break;
2041         case PT_PR:
2042                 pos = ELF_PR_OFFSET;
2043                 break;
2044         case PT_B6:
2045                 pos = ELF_BR_OFFSET(6);
2046                 break;
2047         case PT_AR_BSP:
2048                 pos = ELF_AR_BSP_OFFSET;
2049                 break;
2050         case PT_R1 ... PT_R3:
2051                 pos = addr - PT_R1 + ELF_GR_OFFSET(1);
2052                 break;
2053         case PT_R12 ... PT_R15:
2054                 pos = addr - PT_R12 + ELF_GR_OFFSET(12);
2055                 break;
2056         case PT_R8 ... PT_R11:
2057                 pos = addr - PT_R8 + ELF_GR_OFFSET(8);
2058                 break;
2059         case PT_R16 ... PT_R31:
2060                 pos = addr - PT_R16 + ELF_GR_OFFSET(16);
2061                 break;
2062         case PT_AR_CCV:
2063                 pos = ELF_AR_CCV_OFFSET;
2064                 break;
2065         case PT_AR_FPSR:
2066                 pos = ELF_AR_FPSR_OFFSET;
2067                 break;
2068         case PT_B0:
2069                 pos = ELF_BR_OFFSET(0);
2070                 break;
2071         case PT_B7:
2072                 pos = ELF_BR_OFFSET(7);
2073                 break;
2074         case PT_AR_CSD:
2075                 pos = ELF_AR_CSD_OFFSET;
2076                 break;
2077         case PT_AR_SSD:
2078                 pos = ELF_AR_SSD_OFFSET;
2079                 break;
2080         }
2081
2082         if (pos != -1) {
2083                 if (write_access)
2084                         ret = gpregs_set(child, NULL, pos,
2085                                 sizeof(unsigned long), data, NULL);
2086                 else
2087                         ret = gpregs_get(child, NULL, pos,
2088                                 sizeof(unsigned long), data, NULL);
2089                 if (ret != 0)
2090                         return -1;
2091                 return 0;
2092         }
2093
2094         /* access debug registers */
2095         if (addr >= PT_IBR) {
2096                 regnum = (addr - PT_IBR) >> 3;
2097                 ptr = &child->thread.ibr[0];
2098         } else {
2099                 regnum = (addr - PT_DBR) >> 3;
2100                 ptr = &child->thread.dbr[0];
2101         }
2102
2103         if (regnum >= 8) {
2104                 dprintk("ptrace: rejecting access to register "
2105                                 "address 0x%lx\n", addr);
2106                 return -1;
2107         }
2108 #ifdef CONFIG_PERFMON
2109         /*
2110          * Check if debug registers are used by perfmon. This
2111          * test must be done once we know that we can do the
2112          * operation, i.e. the arguments are all valid, but
2113          * before we start modifying the state.
2114          *
2115          * Perfmon needs to keep a count of how many processes
2116          * are trying to modify the debug registers for system
2117          * wide monitoring sessions.
2118          *
2119          * We also include read access here, because they may
2120          * cause the PMU-installed debug register state
2121          * (dbr[], ibr[]) to be reset. The two arrays are also
2122          * used by perfmon, but we do not use
2123          * IA64_THREAD_DBG_VALID. The registers are restored
2124          * by the PMU context switch code.
2125          */
2126         if (pfm_use_debug_registers(child))
2127                 return -1;
2128 #endif
2129
2130         if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
2131                 child->thread.flags |= IA64_THREAD_DBG_VALID;
2132                 memset(child->thread.dbr, 0,
2133                                 sizeof(child->thread.dbr));
2134                 memset(child->thread.ibr, 0,
2135                                 sizeof(child->thread.ibr));
2136         }
2137
2138         ptr += regnum;
2139
2140         if ((regnum & 1) && write_access) {
2141                 /* don't let the user set kernel-level breakpoints: */
2142                 *ptr = *data & ~(7UL << 56);
2143                 return 0;
2144         }
2145         if (write_access)
2146                 *ptr = *data;
2147         else
2148                 *data = *ptr;
2149         return 0;
2150 }
2151
2152 static const struct user_regset native_regsets[] = {
2153         {
2154                 .core_note_type = NT_PRSTATUS,
2155                 .n = ELF_NGREG,
2156                 .size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t),
2157                 .get = gpregs_get, .set = gpregs_set,
2158                 .writeback = gpregs_writeback
2159         },
2160         {
2161                 .core_note_type = NT_PRFPREG,
2162                 .n = ELF_NFPREG,
2163                 .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t),
2164                 .get = fpregs_get, .set = fpregs_set, .active = fpregs_active
2165         },
2166 };
2167
2168 static const struct user_regset_view user_ia64_view = {
2169         .name = "ia64",
2170         .e_machine = EM_IA_64,
2171         .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
2172 };
2173
2174 const struct user_regset_view *task_user_regset_view(struct task_struct *tsk)
2175 {
2176 #ifdef CONFIG_IA32_SUPPORT
2177         extern const struct user_regset_view user_ia32_view;
2178         if (IS_IA32_PROCESS(task_pt_regs(tsk)))
2179                 return &user_ia32_view;
2180 #endif
2181         return &user_ia64_view;
2182 }
2183
2184 struct syscall_get_set_args {
2185         unsigned int i;
2186         unsigned int n;
2187         unsigned long *args;
2188         struct pt_regs *regs;
2189         int rw;
2190 };
2191
2192 static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
2193 {
2194         struct syscall_get_set_args *args = data;
2195         struct pt_regs *pt = args->regs;
2196         unsigned long *krbs, cfm, ndirty;
2197         int i, count;
2198
2199         if (unw_unwind_to_user(info) < 0)
2200                 return;
2201
2202         cfm = pt->cr_ifs;
2203         krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
2204         ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
2205
2206         count = 0;
2207         if (in_syscall(pt))
2208                 count = min_t(int, args->n, cfm & 0x7f);
2209
2210         for (i = 0; i < count; i++) {
2211                 if (args->rw)
2212                         *ia64_rse_skip_regs(krbs, ndirty + i + args->i) =
2213                                 args->args[i];
2214                 else
2215                         args->args[i] = *ia64_rse_skip_regs(krbs,
2216                                 ndirty + i + args->i);
2217         }
2218
2219         if (!args->rw) {
2220                 while (i < args->n) {
2221                         args->args[i] = 0;
2222                         i++;
2223                 }
2224         }
2225 }
2226
2227 void ia64_syscall_get_set_arguments(struct task_struct *task,
2228         struct pt_regs *regs, unsigned int i, unsigned int n,
2229         unsigned long *args, int rw)
2230 {
2231         struct syscall_get_set_args data = {
2232                 .i = i,
2233                 .n = n,
2234                 .args = args,
2235                 .regs = regs,
2236                 .rw = rw,
2237         };
2238
2239         if (task == current)
2240                 unw_init_running(syscall_get_set_args_cb, &data);
2241         else {
2242                 struct unw_frame_info ufi;
2243                 memset(&ufi, 0, sizeof(ufi));
2244                 unw_init_from_blocked_task(&ufi, task);
2245                 syscall_get_set_args_cb(&ufi, &data);
2246         }
2247 }