1/*
2 * Kernel support for the ptrace() and syscall tracing interfaces.
3 *
4 * Copyright (C) 1999-2005 Hewlett-Packard Co
5 *	David Mosberger-Tang <davidm@hpl.hp.com>
6 * Copyright (C) 2006 Intel Co
7 *  2006-08-12	- IA64 Native Utrace implementation support added by
8 *	Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
9 *
10 * Derived from the x86 and Alpha versions.
11 */
12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/mm.h>
15#include <linux/errno.h>
16#include <linux/ptrace.h>
17#include <linux/user.h>
18#include <linux/security.h>
19#include <linux/audit.h>
20#include <linux/signal.h>
21#include <linux/regset.h>
22#include <linux/elf.h>
23#include <linux/tracehook.h>
24
25#include <asm/pgtable.h>
26#include <asm/processor.h>
27#include <asm/ptrace_offsets.h>
28#include <asm/rse.h>
29#include <asm/uaccess.h>
30#include <asm/unwind.h>
31#ifdef CONFIG_PERFMON
32#include <asm/perfmon.h>
33#endif
34
35#include "entry.h"
36
37/*
38 * Bits in the PSR that we allow ptrace() to change:
39 *	be, up, ac, mfl, mfh (the user mask; five bits total)
40 *	db (debug breakpoint fault; one bit)
41 *	id (instruction debug fault disable; one bit)
42 *	dd (data debug fault disable; one bit)
43 *	ri (restart instruction; two bits)
44 *	is (instruction set; one bit)
45 */
46#define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS	\
47		   | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
48
49#define MASK(nbits)	((1UL << (nbits)) - 1)	/* mask with NBITS bits set */
50#define PFM_MASK	MASK(38)
51
52#define PTRACE_DEBUG	0
53
54#if PTRACE_DEBUG
55# define dprintk(format...)	printk(format)
56# define inline
57#else
58# define dprintk(format...)
59#endif
60
61/* Return TRUE if PT was created due to kernel-entry via a system-call.  */
62
63static inline int
64in_syscall (struct pt_regs *pt)
65{
66	return (long) pt->cr_ifs >= 0;
67}
68
69/*
70 * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT
71 * bitset where bit i is set iff the NaT bit of register i is set.
72 */
73unsigned long
74ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
75{
76#	define GET_BITS(first, last, unat)				\
77	({								\
78		unsigned long bit = ia64_unat_pos(&pt->r##first);	\
79		unsigned long nbits = (last - first + 1);		\
80		unsigned long mask = MASK(nbits) << first;		\
81		unsigned long dist;					\
82		if (bit < first)					\
83			dist = 64 + bit - first;			\
84		else							\
85			dist = bit - first;				\
86		ia64_rotr(unat, dist) & mask;				\
87	})
88	unsigned long val;
89
90	/*
91	 * Registers that are stored consecutively in struct pt_regs
92	 * can be handled in parallel.  If the register order in
93	 * struct_pt_regs changes, this code MUST be updated.
94	 */
95	val  = GET_BITS( 1,  1, scratch_unat);
96	val |= GET_BITS( 2,  3, scratch_unat);
97	val |= GET_BITS(12, 13, scratch_unat);
98	val |= GET_BITS(14, 14, scratch_unat);
99	val |= GET_BITS(15, 15, scratch_unat);
100	val |= GET_BITS( 8, 11, scratch_unat);
101	val |= GET_BITS(16, 31, scratch_unat);
102	return val;
103
104#	undef GET_BITS
105}
106
107/*
108 * Set the NaT bits for the scratch registers according to NAT and
109 * return the resulting unat (assuming the scratch registers are
110 * stored in PT).
111 */
112unsigned long
113ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat)
114{
115#	define PUT_BITS(first, last, nat)				\
116	({								\
117		unsigned long bit = ia64_unat_pos(&pt->r##first);	\
118		unsigned long nbits = (last - first + 1);		\
119		unsigned long mask = MASK(nbits) << first;		\
120		long dist;						\
121		if (bit < first)					\
122			dist = 64 + bit - first;			\
123		else							\
124			dist = bit - first;				\
125		ia64_rotl(nat & mask, dist);				\
126	})
127	unsigned long scratch_unat;
128
129	/*
130	 * Registers that are stored consecutively in struct pt_regs
131	 * can be handled in parallel.  If the register order in
132	 * struct_pt_regs changes, this code MUST be updated.
133	 */
134	scratch_unat  = PUT_BITS( 1,  1, nat);
135	scratch_unat |= PUT_BITS( 2,  3, nat);
136	scratch_unat |= PUT_BITS(12, 13, nat);
137	scratch_unat |= PUT_BITS(14, 14, nat);
138	scratch_unat |= PUT_BITS(15, 15, nat);
139	scratch_unat |= PUT_BITS( 8, 11, nat);
140	scratch_unat |= PUT_BITS(16, 31, nat);
141
142	return scratch_unat;
143
144#	undef PUT_BITS
145}
146
147#define IA64_MLX_TEMPLATE	0x2
148#define IA64_MOVL_OPCODE	6
149
150void
151ia64_increment_ip (struct pt_regs *regs)
152{
153	unsigned long w0, ri = ia64_psr(regs)->ri + 1;
154
155	if (ri > 2) {
156		ri = 0;
157		regs->cr_iip += 16;
158	} else if (ri == 2) {
159		get_user(w0, (char __user *) regs->cr_iip + 0);
160		if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
161			/*
162			 * rfi'ing to slot 2 of an MLX bundle causes
163			 * an illegal operation fault.  We don't want
164			 * that to happen...
165			 */
166			ri = 0;
167			regs->cr_iip += 16;
168		}
169	}
170	ia64_psr(regs)->ri = ri;
171}
172
173void
174ia64_decrement_ip (struct pt_regs *regs)
175{
176	unsigned long w0, ri = ia64_psr(regs)->ri - 1;
177
178	if (ia64_psr(regs)->ri == 0) {
179		regs->cr_iip -= 16;
180		ri = 2;
181		get_user(w0, (char __user *) regs->cr_iip + 0);
182		if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
183			/*
184			 * rfi'ing to slot 2 of an MLX bundle causes
185			 * an illegal operation fault.  We don't want
186			 * that to happen...
187			 */
188			ri = 1;
189		}
190	}
191	ia64_psr(regs)->ri = ri;
192}
193
194/*
195 * This routine is used to read an rnat bits that are stored on the
196 * kernel backing store.  Since, in general, the alignment of the user
197 * and kernel are different, this is not completely trivial.  In
198 * essence, we need to construct the user RNAT based on up to two
199 * kernel RNAT values and/or the RNAT value saved in the child's
200 * pt_regs.
201 *
202 * user rbs
203 *
204 * +--------+ <-- lowest address
205 * | slot62 |
206 * +--------+
207 * |  rnat  | 0x....1f8
208 * +--------+
209 * | slot00 | \
210 * +--------+ |
211 * | slot01 | > child_regs->ar_rnat
212 * +--------+ |
213 * | slot02 | /				kernel rbs
214 * +--------+				+--------+
215 *	    <- child_regs->ar_bspstore	| slot61 | <-- krbs
216 * +- - - - +				+--------+
217 *					| slot62 |
218 * +- - - - +				+--------+
219 *					|  rnat	 |
220 * +- - - - +				+--------+
221 *   vrnat				| slot00 |
222 * +- - - - +				+--------+
223 *					=	 =
224 *					+--------+
225 *					| slot00 | \
226 *					+--------+ |
227 *					| slot01 | > child_stack->ar_rnat
228 *					+--------+ |
229 *					| slot02 | /
230 *					+--------+
231 *						  <--- child_stack->ar_bspstore
232 *
233 * The way to think of this code is as follows: bit 0 in the user rnat
234 * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat
235 * value.  The kernel rnat value holding this bit is stored in
236 * variable rnat0.  rnat1 is loaded with the kernel rnat value that
237 * form the upper bits of the user rnat value.
238 *
239 * Boundary cases:
240 *
241 * o when reading the rnat "below" the first rnat slot on the kernel
242 *   backing store, rnat0/rnat1 are set to 0 and the low order bits are
243 *   merged in from pt->ar_rnat.
244 *
245 * o when reading the rnat "above" the last rnat slot on the kernel
246 *   backing store, rnat0/rnat1 gets its value from sw->ar_rnat.
247 */
248static unsigned long
249get_rnat (struct task_struct *task, struct switch_stack *sw,
250	  unsigned long *krbs, unsigned long *urnat_addr,
251	  unsigned long *urbs_end)
252{
253	unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr;
254	unsigned long umask = 0, mask, m;
255	unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
256	long num_regs, nbits;
257	struct pt_regs *pt;
258
259	pt = task_pt_regs(task);
260	kbsp = (unsigned long *) sw->ar_bspstore;
261	ubspstore = (unsigned long *) pt->ar_bspstore;
262
263	if (urbs_end < urnat_addr)
264		nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end);
265	else
266		nbits = 63;
267	mask = MASK(nbits);
268	/*
269	 * First, figure out which bit number slot 0 in user-land maps
270	 * to in the kernel rnat.  Do this by figuring out how many
271	 * register slots we're beyond the user's backingstore and
272	 * then computing the equivalent address in kernel space.
273	 */
274	num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
275	slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
276	shift = ia64_rse_slot_num(slot0_kaddr);
277	rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
278	rnat0_kaddr = rnat1_kaddr - 64;
279
280	if (ubspstore + 63 > urnat_addr) {
281		/* some bits need to be merged in from pt->ar_rnat */
282		umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
283		urnat = (pt->ar_rnat & umask);
284		mask &= ~umask;
285		if (!mask)
286			return urnat;
287	}
288
289	m = mask << shift;
290	if (rnat0_kaddr >= kbsp)
291		rnat0 = sw->ar_rnat;
292	else if (rnat0_kaddr > krbs)
293		rnat0 = *rnat0_kaddr;
294	urnat |= (rnat0 & m) >> shift;
295
296	m = mask >> (63 - shift);
297	if (rnat1_kaddr >= kbsp)
298		rnat1 = sw->ar_rnat;
299	else if (rnat1_kaddr > krbs)
300		rnat1 = *rnat1_kaddr;
301	urnat |= (rnat1 & m) << (63 - shift);
302	return urnat;
303}
304
305/*
306 * The reverse of get_rnat.
307 */
308static void
309put_rnat (struct task_struct *task, struct switch_stack *sw,
310	  unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat,
311	  unsigned long *urbs_end)
312{
313	unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
314	unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
315	long num_regs, nbits;
316	struct pt_regs *pt;
317	unsigned long cfm, *urbs_kargs;
318
319	pt = task_pt_regs(task);
320	kbsp = (unsigned long *) sw->ar_bspstore;
321	ubspstore = (unsigned long *) pt->ar_bspstore;
322
323	urbs_kargs = urbs_end;
324	if (in_syscall(pt)) {
325		/*
326		 * If entered via syscall, don't allow user to set rnat bits
327		 * for syscall args.
328		 */
329		cfm = pt->cr_ifs;
330		urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f));
331	}
332
333	if (urbs_kargs >= urnat_addr)
334		nbits = 63;
335	else {
336		if ((urnat_addr - 63) >= urbs_kargs)
337			return;
338		nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs);
339	}
340	mask = MASK(nbits);
341
342	/*
343	 * First, figure out which bit number slot 0 in user-land maps
344	 * to in the kernel rnat.  Do this by figuring out how many
345	 * register slots we're beyond the user's backingstore and
346	 * then computing the equivalent address in kernel space.
347	 */
348	num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
349	slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
350	shift = ia64_rse_slot_num(slot0_kaddr);
351	rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
352	rnat0_kaddr = rnat1_kaddr - 64;
353
354	if (ubspstore + 63 > urnat_addr) {
355		/* some bits need to be place in pt->ar_rnat: */
356		umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
357		pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
358		mask &= ~umask;
359		if (!mask)
360			return;
361	}
362	/*
363	 * Note: Section 11.1 of the EAS guarantees that bit 63 of an
364	 * rnat slot is ignored. so we don't have to clear it here.
365	 */
366	rnat0 = (urnat << shift);
367	m = mask << shift;
368	if (rnat0_kaddr >= kbsp)
369		sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
370	else if (rnat0_kaddr > krbs)
371		*rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
372
373	rnat1 = (urnat >> (63 - shift));
374	m = mask >> (63 - shift);
375	if (rnat1_kaddr >= kbsp)
376		sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
377	else if (rnat1_kaddr > krbs)
378		*rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
379}
380
381static inline int
382on_kernel_rbs (unsigned long addr, unsigned long bspstore,
383	       unsigned long urbs_end)
384{
385	unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *)
386						      urbs_end);
387	return (addr >= bspstore && addr <= (unsigned long) rnat_addr);
388}
389
390/*
391 * Read a word from the user-level backing store of task CHILD.  ADDR
392 * is the user-level address to read the word from, VAL a pointer to
393 * the return value, and USER_BSP gives the end of the user-level
394 * backing store (i.e., it's the address that would be in ar.bsp after
395 * the user executed a "cover" instruction).
396 *
397 * This routine takes care of accessing the kernel register backing
398 * store for those registers that got spilled there.  It also takes
399 * care of calculating the appropriate RNaT collection words.
400 */
401long
402ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
403	   unsigned long user_rbs_end, unsigned long addr, long *val)
404{
405	unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr;
406	struct pt_regs *child_regs;
407	size_t copied;
408	long ret;
409
410	urbs_end = (long *) user_rbs_end;
411	laddr = (unsigned long *) addr;
412	child_regs = task_pt_regs(child);
413	bspstore = (unsigned long *) child_regs->ar_bspstore;
414	krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
415	if (on_kernel_rbs(addr, (unsigned long) bspstore,
416			  (unsigned long) urbs_end))
417	{
418		/*
419		 * Attempt to read the RBS in an area that's actually
420		 * on the kernel RBS => read the corresponding bits in
421		 * the kernel RBS.
422		 */
423		rnat_addr = ia64_rse_rnat_addr(laddr);
424		ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end);
425
426		if (laddr == rnat_addr) {
427			/* return NaT collection word itself */
428			*val = ret;
429			return 0;
430		}
431
432		if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) {
433			/*
434			 * It is implementation dependent whether the
435			 * data portion of a NaT value gets saved on a
436			 * st8.spill or RSE spill (e.g., see EAS 2.6,
437			 * 4.4.4.6 Register Spill and Fill).  To get
438			 * consistent behavior across all possible
439			 * IA-64 implementations, we return zero in
440			 * this case.
441			 */
442			*val = 0;
443			return 0;
444		}
445
446		if (laddr < urbs_end) {
447			/*
448			 * The desired word is on the kernel RBS and
449			 * is not a NaT.
450			 */
451			regnum = ia64_rse_num_regs(bspstore, laddr);
452			*val = *ia64_rse_skip_regs(krbs, regnum);
453			return 0;
454		}
455	}
456	copied = access_process_vm(child, addr, &ret, sizeof(ret), 0);
457	if (copied != sizeof(ret))
458		return -EIO;
459	*val = ret;
460	return 0;
461}
462
463long
464ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
465	   unsigned long user_rbs_end, unsigned long addr, long val)
466{
467	unsigned long *bspstore, *krbs, regnum, *laddr;
468	unsigned long *urbs_end = (long *) user_rbs_end;
469	struct pt_regs *child_regs;
470
471	laddr = (unsigned long *) addr;
472	child_regs = task_pt_regs(child);
473	bspstore = (unsigned long *) child_regs->ar_bspstore;
474	krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
475	if (on_kernel_rbs(addr, (unsigned long) bspstore,
476			  (unsigned long) urbs_end))
477	{
478		/*
479		 * Attempt to write the RBS in an area that's actually
480		 * on the kernel RBS => write the corresponding bits
481		 * in the kernel RBS.
482		 */
483		if (ia64_rse_is_rnat_slot(laddr))
484			put_rnat(child, child_stack, krbs, laddr, val,
485				 urbs_end);
486		else {
487			if (laddr < urbs_end) {
488				regnum = ia64_rse_num_regs(bspstore, laddr);
489				*ia64_rse_skip_regs(krbs, regnum) = val;
490			}
491		}
492	} else if (access_process_vm(child, addr, &val, sizeof(val), 1)
493		   != sizeof(val))
494		return -EIO;
495	return 0;
496}
497
498/*
499 * Calculate the address of the end of the user-level register backing
500 * store.  This is the address that would have been stored in ar.bsp
501 * if the user had executed a "cover" instruction right before
502 * entering the kernel.  If CFMP is not NULL, it is used to return the
503 * "current frame mask" that was active at the time the kernel was
504 * entered.
505 */
506unsigned long
507ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt,
508		       unsigned long *cfmp)
509{
510	unsigned long *krbs, *bspstore, cfm = pt->cr_ifs;
511	long ndirty;
512
513	krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
514	bspstore = (unsigned long *) pt->ar_bspstore;
515	ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
516
517	if (in_syscall(pt))
518		ndirty += (cfm & 0x7f);
519	else
520		cfm &= ~(1UL << 63);	/* clear valid bit */
521
522	if (cfmp)
523		*cfmp = cfm;
524	return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
525}
526
527/*
528 * Synchronize (i.e, write) the RSE backing store living in kernel
529 * space to the VM of the CHILD task.  SW and PT are the pointers to
530 * the switch_stack and pt_regs structures, respectively.
531 * USER_RBS_END is the user-level address at which the backing store
532 * ends.
533 */
534long
535ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
536		    unsigned long user_rbs_start, unsigned long user_rbs_end)
537{
538	unsigned long addr, val;
539	long ret;
540
541	/* now copy word for word from kernel rbs to user rbs: */
542	for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
543		ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
544		if (ret < 0)
545			return ret;
546		if (access_process_vm(child, addr, &val, sizeof(val), 1)
547		    != sizeof(val))
548			return -EIO;
549	}
550	return 0;
551}
552
553static long
554ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw,
555		unsigned long user_rbs_start, unsigned long user_rbs_end)
556{
557	unsigned long addr, val;
558	long ret;
559
560	/* now copy word for word from user rbs to kernel rbs: */
561	for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
562		if (access_process_vm(child, addr, &val, sizeof(val), 0)
563				!= sizeof(val))
564			return -EIO;
565
566		ret = ia64_poke(child, sw, user_rbs_end, addr, val);
567		if (ret < 0)
568			return ret;
569	}
570	return 0;
571}
572
573typedef long (*syncfunc_t)(struct task_struct *, struct switch_stack *,
574			    unsigned long, unsigned long);
575
576static void do_sync_rbs(struct unw_frame_info *info, void *arg)
577{
578	struct pt_regs *pt;
579	unsigned long urbs_end;
580	syncfunc_t fn = arg;
581
582	if (unw_unwind_to_user(info) < 0)
583		return;
584	pt = task_pt_regs(info->task);
585	urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL);
586
587	fn(info->task, info->sw, pt->ar_bspstore, urbs_end);
588}
589
590/*
591 * when a thread is stopped (ptraced), debugger might change thread's user
592 * stack (change memory directly), and we must avoid the RSE stored in kernel
593 * to override user stack (user space's RSE is newer than kernel's in the
594 * case). To workaround the issue, we copy kernel RSE to user RSE before the
595 * task is stopped, so user RSE has updated data.  we then copy user RSE to
596 * kernel after the task is resummed from traced stop and kernel will use the
597 * newer RSE to return to user. TIF_RESTORE_RSE is the flag to indicate we need
598 * synchronize user RSE to kernel.
599 */
600void ia64_ptrace_stop(void)
601{
602	if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE))
603		return;
604	set_notify_resume(current);
605	unw_init_running(do_sync_rbs, ia64_sync_user_rbs);
606}
607
608/*
609 * This is called to read back the register backing store.
610 */
611void ia64_sync_krbs(void)
612{
613	clear_tsk_thread_flag(current, TIF_RESTORE_RSE);
614
615	unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs);
616}
617
618/*
619 * After PTRACE_ATTACH, a thread's register backing store area in user
620 * space is assumed to contain correct data whenever the thread is
621 * stopped.  arch_ptrace_stop takes care of this on tracing stops.
622 * But if the child was already stopped for job control when we attach
623 * to it, then it might not ever get into ptrace_stop by the time we
624 * want to examine the user memory containing the RBS.
625 */
626void
627ptrace_attach_sync_user_rbs (struct task_struct *child)
628{
629	int stopped = 0;
630	struct unw_frame_info info;
631
632	/*
633	 * If the child is in TASK_STOPPED, we need to change that to
634	 * TASK_TRACED momentarily while we operate on it.  This ensures
635	 * that the child won't be woken up and return to user mode while
636	 * we are doing the sync.  (It can only be woken up for SIGKILL.)
637	 */
638
639	read_lock(&tasklist_lock);
640	if (child->sighand) {
641		spin_lock_irq(&child->sighand->siglock);
642		if (child->state == TASK_STOPPED &&
643		    !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
644			set_notify_resume(child);
645
646			child->state = TASK_TRACED;
647			stopped = 1;
648		}
649		spin_unlock_irq(&child->sighand->siglock);
650	}
651	read_unlock(&tasklist_lock);
652
653	if (!stopped)
654		return;
655
656	unw_init_from_blocked_task(&info, child);
657	do_sync_rbs(&info, ia64_sync_user_rbs);
658
659	/*
660	 * Now move the child back into TASK_STOPPED if it should be in a
661	 * job control stop, so that SIGCONT can be used to wake it up.
662	 */
663	read_lock(&tasklist_lock);
664	if (child->sighand) {
665		spin_lock_irq(&child->sighand->siglock);
666		if (child->state == TASK_TRACED &&
667		    (child->signal->flags & SIGNAL_STOP_STOPPED)) {
668			child->state = TASK_STOPPED;
669		}
670		spin_unlock_irq(&child->sighand->siglock);
671	}
672	read_unlock(&tasklist_lock);
673}
674
675/*
676 * Write f32-f127 back to task->thread.fph if it has been modified.
677 */
678inline void
679ia64_flush_fph (struct task_struct *task)
680{
681	struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
682
683	/*
684	 * Prevent migrating this task while
685	 * we're fiddling with the FPU state
686	 */
687	preempt_disable();
688	if (ia64_is_local_fpu_owner(task) && psr->mfh) {
689		psr->mfh = 0;
690		task->thread.flags |= IA64_THREAD_FPH_VALID;
691		ia64_save_fpu(&task->thread.fph[0]);
692	}
693	preempt_enable();
694}
695
696/*
697 * Sync the fph state of the task so that it can be manipulated
698 * through thread.fph.  If necessary, f32-f127 are written back to
699 * thread.fph or, if the fph state hasn't been used before, thread.fph
700 * is cleared to zeroes.  Also, access to f32-f127 is disabled to
701 * ensure that the task picks up the state from thread.fph when it
702 * executes again.
703 */
704void
705ia64_sync_fph (struct task_struct *task)
706{
707	struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
708
709	ia64_flush_fph(task);
710	if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
711		task->thread.flags |= IA64_THREAD_FPH_VALID;
712		memset(&task->thread.fph, 0, sizeof(task->thread.fph));
713	}
714	ia64_drop_fpu(task);
715	psr->dfh = 1;
716}
717
718/*
719 * Change the machine-state of CHILD such that it will return via the normal
720 * kernel exit-path, rather than the syscall-exit path.
721 */
722static void
723convert_to_non_syscall (struct task_struct *child, struct pt_regs  *pt,
724			unsigned long cfm)
725{
726	struct unw_frame_info info, prev_info;
727	unsigned long ip, sp, pr;
728
729	unw_init_from_blocked_task(&info, child);
730	while (1) {
731		prev_info = info;
732		if (unw_unwind(&info) < 0)
733			return;
734
735		unw_get_sp(&info, &sp);
736		if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
737		    < IA64_PT_REGS_SIZE) {
738			dprintk("ptrace.%s: ran off the top of the kernel "
739				"stack\n", __func__);
740			return;
741		}
742		if (unw_get_pr (&prev_info, &pr) < 0) {
743			unw_get_rp(&prev_info, &ip);
744			dprintk("ptrace.%s: failed to read "
745				"predicate register (ip=0x%lx)\n",
746				__func__, ip);
747			return;
748		}
749		if (unw_is_intr_frame(&info)
750		    && (pr & (1UL << PRED_USER_STACK)))
751			break;
752	}
753
754	/*
755	 * Note: at the time of this call, the target task is blocked
756	 * in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL
757	 * (aka, "pLvSys") we redirect execution from
758	 * .work_pending_syscall_end to .work_processed_kernel.
759	 */
760	unw_get_pr(&prev_info, &pr);
761	pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL));
762	pr |=  (1UL << PRED_NON_SYSCALL);
763	unw_set_pr(&prev_info, pr);
764
765	pt->cr_ifs = (1UL << 63) | cfm;
766	/*
767	 * Clear the memory that is NOT written on syscall-entry to
768	 * ensure we do not leak kernel-state to user when execution
769	 * resumes.
770	 */
771	pt->r2 = 0;
772	pt->r3 = 0;
773	pt->r14 = 0;
774	memset(&pt->r16, 0, 16*8);	/* clear r16-r31 */
775	memset(&pt->f6, 0, 6*16);	/* clear f6-f11 */
776	pt->b7 = 0;
777	pt->ar_ccv = 0;
778	pt->ar_csd = 0;
779	pt->ar_ssd = 0;
780}
781
782static int
783access_nat_bits (struct task_struct *child, struct pt_regs *pt,
784		 struct unw_frame_info *info,
785		 unsigned long *data, int write_access)
786{
787	unsigned long regnum, nat_bits, scratch_unat, dummy = 0;
788	char nat = 0;
789
790	if (write_access) {
791		nat_bits = *data;
792		scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);
793		if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) {
794			dprintk("ptrace: failed to set ar.unat\n");
795			return -1;
796		}
797		for (regnum = 4; regnum <= 7; ++regnum) {
798			unw_get_gr(info, regnum, &dummy, &nat);
799			unw_set_gr(info, regnum, dummy,
800				   (nat_bits >> regnum) & 1);
801		}
802	} else {
803		if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) {
804			dprintk("ptrace: failed to read ar.unat\n");
805			return -1;
806		}
807		nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);
808		for (regnum = 4; regnum <= 7; ++regnum) {
809			unw_get_gr(info, regnum, &dummy, &nat);
810			nat_bits |= (nat != 0) << regnum;
811		}
812		*data = nat_bits;
813	}
814	return 0;
815}
816
817static int
818access_uarea (struct task_struct *child, unsigned long addr,
819	      unsigned long *data, int write_access);
820
821static long
822ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
823{
824	unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val;
825	struct unw_frame_info info;
826	struct ia64_fpreg fpval;
827	struct switch_stack *sw;
828	struct pt_regs *pt;
829	long ret, retval = 0;
830	char nat = 0;
831	int i;
832
833	if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs)))
834		return -EIO;
835
836	pt = task_pt_regs(child);
837	sw = (struct switch_stack *) (child->thread.ksp + 16);
838	unw_init_from_blocked_task(&info, child);
839	if (unw_unwind_to_user(&info) < 0) {
840		return -EIO;
841	}
842
843	if (((unsigned long) ppr & 0x7) != 0) {
844		dprintk("ptrace:unaligned register address %p\n", ppr);
845		return -EIO;
846	}
847
848	if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0
849	    || access_uarea(child, PT_AR_EC, &ec, 0) < 0
850	    || access_uarea(child, PT_AR_LC, &lc, 0) < 0
851	    || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0
852	    || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0
853	    || access_uarea(child, PT_CFM, &cfm, 0)
854	    || access_uarea(child, PT_NAT_BITS, &nat_bits, 0))
855		return -EIO;
856
857	/* control regs */
858
859	retval |= __put_user(pt->cr_iip, &ppr->cr_iip);
860	retval |= __put_user(psr, &ppr->cr_ipsr);
861
862	/* app regs */
863
864	retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
865	retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
866	retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
867	retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
868	retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
869	retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
870
871	retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]);
872	retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]);
873	retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]);
874	retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]);
875	retval |= __put_user(cfm, &ppr->cfm);
876
877	/* gr1-gr3 */
878
879	retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long));
880	retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2);
881
882	/* gr4-gr7 */
883
884	for (i = 4; i < 8; i++) {
885		if (unw_access_gr(&info, i, &val, &nat, 0) < 0)
886			return -EIO;
887		retval |= __put_user(val, &ppr->gr[i]);
888	}
889
890	/* gr8-gr11 */
891
892	retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4);
893
894	/* gr12-gr15 */
895
896	retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2);
897	retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long));
898	retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long));
899
900	/* gr16-gr31 */
901
902	retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16);
903
904	/* b0 */
905
906	retval |= __put_user(pt->b0, &ppr->br[0]);
907
908	/* b1-b5 */
909
910	for (i = 1; i < 6; i++) {
911		if (unw_access_br(&info, i, &val, 0) < 0)
912			return -EIO;
913		__put_user(val, &ppr->br[i]);
914	}
915
916	/* b6-b7 */
917
918	retval |= __put_user(pt->b6, &ppr->br[6]);
919	retval |= __put_user(pt->b7, &ppr->br[7]);
920
921	/* fr2-fr5 */
922
923	for (i = 2; i < 6; i++) {
924		if (unw_get_fr(&info, i, &fpval) < 0)
925			return -EIO;
926		retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
927	}
928
929	/* fr6-fr11 */
930
931	retval |= __copy_to_user(&ppr->fr[6], &pt->f6,
932				 sizeof(struct ia64_fpreg) * 6);
933
934	/* fp scratch regs(12-15) */
935
936	retval |= __copy_to_user(&ppr->fr[12], &sw->f12,
937				 sizeof(struct ia64_fpreg) * 4);
938
939	/* fr16-fr31 */
940
941	for (i = 16; i < 32; i++) {
942		if (unw_get_fr(&info, i, &fpval) < 0)
943			return -EIO;
944		retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
945	}
946
947	/* fph */
948
949	ia64_flush_fph(child);
950	retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph,
951				 sizeof(ppr->fr[32]) * 96);
952
953	/*  preds */
954
955	retval |= __put_user(pt->pr, &ppr->pr);
956
957	/* nat bits */
958
959	retval |= __put_user(nat_bits, &ppr->nat);
960
961	ret = retval ? -EIO : 0;
962	return ret;
963}
964
965static long
966ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
967{
968	unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
969	struct unw_frame_info info;
970	struct switch_stack *sw;
971	struct ia64_fpreg fpval;
972	struct pt_regs *pt;
973	long ret, retval = 0;
974	int i;
975
976	memset(&fpval, 0, sizeof(fpval));
977
978	if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs)))
979		return -EIO;
980
981	pt = task_pt_regs(child);
982	sw = (struct switch_stack *) (child->thread.ksp + 16);
983	unw_init_from_blocked_task(&info, child);
984	if (unw_unwind_to_user(&info) < 0) {
985		return -EIO;
986	}
987
988	if (((unsigned long) ppr & 0x7) != 0) {
989		dprintk("ptrace:unaligned register address %p\n", ppr);
990		return -EIO;
991	}
992
993	/* control regs */
994
995	retval |= __get_user(pt->cr_iip, &ppr->cr_iip);
996	retval |= __get_user(psr, &ppr->cr_ipsr);
997
998	/* app regs */
999
1000	retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
1001	retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
1002	retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
1003	retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
1004	retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
1005	retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
1006
1007	retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]);
1008	retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]);
1009	retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]);
1010	retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]);
1011	retval |= __get_user(cfm, &ppr->cfm);
1012
1013	/* gr1-gr3 */
1014
1015	retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long));
1016	retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2);
1017
1018	/* gr4-gr7 */
1019
1020	for (i = 4; i < 8; i++) {
1021		retval |= __get_user(val, &ppr->gr[i]);
1022		/* NaT bit will be set via PT_NAT_BITS: */
1023		if (unw_set_gr(&info, i, val, 0) < 0)
1024			return -EIO;
1025	}
1026
1027	/* gr8-gr11 */
1028
1029	retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4);
1030
1031	/* gr12-gr15 */
1032
1033	retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2);
1034	retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long));
1035	retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long));
1036
1037	/* gr16-gr31 */
1038
1039	retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16);
1040
1041	/* b0 */
1042
1043	retval |= __get_user(pt->b0, &ppr->br[0]);
1044
1045	/* b1-b5 */
1046
1047	for (i = 1; i < 6; i++) {
1048		retval |= __get_user(val, &ppr->br[i]);
1049		unw_set_br(&info, i, val);
1050	}
1051
1052	/* b6-b7 */
1053
1054	retval |= __get_user(pt->b6, &ppr->br[6]);
1055	retval |= __get_user(pt->b7, &ppr->br[7]);
1056
1057	/* fr2-fr5 */
1058
1059	for (i = 2; i < 6; i++) {
1060		retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval));
1061		if (unw_set_fr(&info, i, fpval) < 0)
1062			return -EIO;
1063	}
1064
1065	/* fr6-fr11 */
1066
1067	retval |= __copy_from_user(&pt->f6, &ppr->fr[6],
1068				   sizeof(ppr->fr[6]) * 6);
1069
1070	/* fp scratch regs(12-15) */
1071
1072	retval |= __copy_from_user(&sw->f12, &ppr->fr[12],
1073				   sizeof(ppr->fr[12]) * 4);
1074
1075	/* fr16-fr31 */
1076
1077	for (i = 16; i < 32; i++) {
1078		retval |= __copy_from_user(&fpval, &ppr->fr[i],
1079					   sizeof(fpval));
1080		if (unw_set_fr(&info, i, fpval) < 0)
1081			return -EIO;
1082	}
1083
1084	/* fph */
1085
1086	ia64_sync_fph(child);
1087	retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32],
1088				   sizeof(ppr->fr[32]) * 96);
1089
1090	/* preds */
1091
1092	retval |= __get_user(pt->pr, &ppr->pr);
1093
1094	/* nat bits */
1095
1096	retval |= __get_user(nat_bits, &ppr->nat);
1097
1098	retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
1099	retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
1100	retval |= access_uarea(child, PT_AR_EC, &ec, 1);
1101	retval |= access_uarea(child, PT_AR_LC, &lc, 1);
1102	retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
1103	retval |= access_uarea(child, PT_AR_BSP, &bsp, 1);
1104	retval |= access_uarea(child, PT_CFM, &cfm, 1);
1105	retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1);
1106
1107	ret = retval ? -EIO : 0;
1108	return ret;
1109}
1110
1111void
1112user_enable_single_step (struct task_struct *child)
1113{
1114	struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1115
1116	set_tsk_thread_flag(child, TIF_SINGLESTEP);
1117	child_psr->ss = 1;
1118}
1119
1120void
1121user_enable_block_step (struct task_struct *child)
1122{
1123	struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1124
1125	set_tsk_thread_flag(child, TIF_SINGLESTEP);
1126	child_psr->tb = 1;
1127}
1128
1129void
1130user_disable_single_step (struct task_struct *child)
1131{
1132	struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1133
1134	/* make sure the single step/taken-branch trap bits are not set: */
1135	clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1136	child_psr->ss = 0;
1137	child_psr->tb = 0;
1138}
1139
1140/*
1141 * Called by kernel/ptrace.c when detaching..
1142 *
1143 * Make sure the single step bit is not set.
1144 */
1145void
1146ptrace_disable (struct task_struct *child)
1147{
1148	user_disable_single_step(child);
1149}
1150
1151long
1152arch_ptrace (struct task_struct *child, long request,
1153	     unsigned long addr, unsigned long data)
1154{
1155	switch (request) {
1156	case PTRACE_PEEKTEXT:
1157	case PTRACE_PEEKDATA:
1158		/* read word at location addr */
1159		if (access_process_vm(child, addr, &data, sizeof(data), 0)
1160		    != sizeof(data))
1161			return -EIO;
1162		/* ensure return value is not mistaken for error code */
1163		force_successful_syscall_return();
1164		return data;
1165
1166	/* PTRACE_POKETEXT and PTRACE_POKEDATA is handled
1167	 * by the generic ptrace_request().
1168	 */
1169
1170	case PTRACE_PEEKUSR:
1171		/* read the word at addr in the USER area */
1172		if (access_uarea(child, addr, &data, 0) < 0)
1173			return -EIO;
1174		/* ensure return value is not mistaken for error code */
1175		force_successful_syscall_return();
1176		return data;
1177
1178	case PTRACE_POKEUSR:
1179		/* write the word at addr in the USER area */
1180		if (access_uarea(child, addr, &data, 1) < 0)
1181			return -EIO;
1182		return 0;
1183
1184	case PTRACE_OLD_GETSIGINFO:
1185		/* for backwards-compatibility */
1186		return ptrace_request(child, PTRACE_GETSIGINFO, addr, data);
1187
1188	case PTRACE_OLD_SETSIGINFO:
1189		/* for backwards-compatibility */
1190		return ptrace_request(child, PTRACE_SETSIGINFO, addr, data);
1191
1192	case PTRACE_GETREGS:
1193		return ptrace_getregs(child,
1194				      (struct pt_all_user_regs __user *) data);
1195
1196	case PTRACE_SETREGS:
1197		return ptrace_setregs(child,
1198				      (struct pt_all_user_regs __user *) data);
1199
1200	default:
1201		return ptrace_request(child, request, addr, data);
1202	}
1203}
1204
1205
1206/* "asmlinkage" so the input arguments are preserved... */
1207
1208asmlinkage long
1209syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
1210		     long arg4, long arg5, long arg6, long arg7,
1211		     struct pt_regs regs)
1212{
1213	if (test_thread_flag(TIF_SYSCALL_TRACE))
1214		if (tracehook_report_syscall_entry(&regs))
1215			return -ENOSYS;
1216
1217	/* copy user rbs to kernel rbs */
1218	if (test_thread_flag(TIF_RESTORE_RSE))
1219		ia64_sync_krbs();
1220
1221
1222	audit_syscall_entry(regs.r15, arg0, arg1, arg2, arg3);
1223
1224	return 0;
1225}
1226
1227/* "asmlinkage" so the input arguments are preserved... */
1228
1229asmlinkage void
1230syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1231		     long arg4, long arg5, long arg6, long arg7,
1232		     struct pt_regs regs)
1233{
1234	int step;
1235
1236	audit_syscall_exit(&regs);
1237
1238	step = test_thread_flag(TIF_SINGLESTEP);
1239	if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1240		tracehook_report_syscall_exit(&regs, step);
1241
1242	/* copy user rbs to kernel rbs */
1243	if (test_thread_flag(TIF_RESTORE_RSE))
1244		ia64_sync_krbs();
1245}
1246
1247/* Utrace implementation starts here */
1248struct regset_get {
1249	void *kbuf;
1250	void __user *ubuf;
1251};
1252
1253struct regset_set {
1254	const void *kbuf;
1255	const void __user *ubuf;
1256};
1257
1258struct regset_getset {
1259	struct task_struct *target;
1260	const struct user_regset *regset;
1261	union {
1262		struct regset_get get;
1263		struct regset_set set;
1264	} u;
1265	unsigned int pos;
1266	unsigned int count;
1267	int ret;
1268};
1269
1270static int
1271access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info,
1272		unsigned long addr, unsigned long *data, int write_access)
1273{
1274	struct pt_regs *pt;
1275	unsigned long *ptr = NULL;
1276	int ret;
1277	char nat = 0;
1278
1279	pt = task_pt_regs(target);
1280	switch (addr) {
1281	case ELF_GR_OFFSET(1):
1282		ptr = &pt->r1;
1283		break;
1284	case ELF_GR_OFFSET(2):
1285	case ELF_GR_OFFSET(3):
1286		ptr = (void *)&pt->r2 + (addr - ELF_GR_OFFSET(2));
1287		break;
1288	case ELF_GR_OFFSET(4) ... ELF_GR_OFFSET(7):
1289		if (write_access) {
1290			/* read NaT bit first: */
1291			unsigned long dummy;
1292
1293			ret = unw_get_gr(info, addr/8, &dummy, &nat);
1294			if (ret < 0)
1295				return ret;
1296		}
1297		return unw_access_gr(info, addr/8, data, &nat, write_access);
1298	case ELF_GR_OFFSET(8) ... ELF_GR_OFFSET(11):
1299		ptr = (void *)&pt->r8 + addr - ELF_GR_OFFSET(8);
1300		break;
1301	case ELF_GR_OFFSET(12):
1302	case ELF_GR_OFFSET(13):
1303		ptr = (void *)&pt->r12 + addr - ELF_GR_OFFSET(12);
1304		break;
1305	case ELF_GR_OFFSET(14):
1306		ptr = &pt->r14;
1307		break;
1308	case ELF_GR_OFFSET(15):
1309		ptr = &pt->r15;
1310	}
1311	if (write_access)
1312		*ptr = *data;
1313	else
1314		*data = *ptr;
1315	return 0;
1316}
1317
1318static int
1319access_elf_breg(struct task_struct *target, struct unw_frame_info *info,
1320		unsigned long addr, unsigned long *data, int write_access)
1321{
1322	struct pt_regs *pt;
1323	unsigned long *ptr = NULL;
1324
1325	pt = task_pt_regs(target);
1326	switch (addr) {
1327	case ELF_BR_OFFSET(0):
1328		ptr = &pt->b0;
1329		break;
1330	case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5):
1331		return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8,
1332				     data, write_access);
1333	case ELF_BR_OFFSET(6):
1334		ptr = &pt->b6;
1335		break;
1336	case ELF_BR_OFFSET(7):
1337		ptr = &pt->b7;
1338	}
1339	if (write_access)
1340		*ptr = *data;
1341	else
1342		*data = *ptr;
1343	return 0;
1344}
1345
1346static int
1347access_elf_areg(struct task_struct *target, struct unw_frame_info *info,
1348		unsigned long addr, unsigned long *data, int write_access)
1349{
1350	struct pt_regs *pt;
1351	unsigned long cfm, urbs_end;
1352	unsigned long *ptr = NULL;
1353
1354	pt = task_pt_regs(target);
1355	if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) {
1356		switch (addr) {
1357		case ELF_AR_RSC_OFFSET:
1358			/* force PL3 */
1359			if (write_access)
1360				pt->ar_rsc = *data | (3 << 2);
1361			else
1362				*data = pt->ar_rsc;
1363			return 0;
1364		case ELF_AR_BSP_OFFSET:
1365			/*
1366			 * By convention, we use PT_AR_BSP to refer to
1367			 * the end of the user-level backing store.
1368			 * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
1369			 * to get the real value of ar.bsp at the time
1370			 * the kernel was entered.
1371			 *
1372			 * Furthermore, when changing the contents of
1373			 * PT_AR_BSP (or PT_CFM) while the task is
1374			 * blocked in a system call, convert the state
1375			 * so that the non-system-call exit
1376			 * path is used.  This ensures that the proper
1377			 * state will be picked up when resuming
1378			 * execution.  However, it *also* means that
1379			 * once we write PT_AR_BSP/PT_CFM, it won't be
1380			 * possible to modify the syscall arguments of
1381			 * the pending system call any longer.  This
1382			 * shouldn't be an issue because modifying
1383			 * PT_AR_BSP/PT_CFM generally implies that
1384			 * we're either abandoning the pending system
1385			 * call or that we defer it's re-execution
1386			 * (e.g., due to GDB doing an inferior
1387			 * function call).
1388			 */
1389			urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1390			if (write_access) {
1391				if (*data != urbs_end) {
1392					if (in_syscall(pt))
1393						convert_to_non_syscall(target,
1394								       pt,
1395								       cfm);
1396					/*
1397					 * Simulate user-level write
1398					 * of ar.bsp:
1399					 */
1400					pt->loadrs = 0;
1401					pt->ar_bspstore = *data;
1402				}
1403			} else
1404				*data = urbs_end;
1405			return 0;
1406		case ELF_AR_BSPSTORE_OFFSET:
1407			ptr = &pt->ar_bspstore;
1408			break;
1409		case ELF_AR_RNAT_OFFSET:
1410			ptr = &pt->ar_rnat;
1411			break;
1412		case ELF_AR_CCV_OFFSET:
1413			ptr = &pt->ar_ccv;
1414			break;
1415		case ELF_AR_UNAT_OFFSET:
1416			ptr = &pt->ar_unat;
1417			break;
1418		case ELF_AR_FPSR_OFFSET:
1419			ptr = &pt->ar_fpsr;
1420			break;
1421		case ELF_AR_PFS_OFFSET:
1422			ptr = &pt->ar_pfs;
1423			break;
1424		case ELF_AR_LC_OFFSET:
1425			return unw_access_ar(info, UNW_AR_LC, data,
1426					     write_access);
1427		case ELF_AR_EC_OFFSET:
1428			return unw_access_ar(info, UNW_AR_EC, data,
1429					     write_access);
1430		case ELF_AR_CSD_OFFSET:
1431			ptr = &pt->ar_csd;
1432			break;
1433		case ELF_AR_SSD_OFFSET:
1434			ptr = &pt->ar_ssd;
1435		}
1436	} else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) {
1437		switch (addr) {
1438		case ELF_CR_IIP_OFFSET:
1439			ptr = &pt->cr_iip;
1440			break;
1441		case ELF_CFM_OFFSET:
1442			urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1443			if (write_access) {
1444				if (((cfm ^ *data) & PFM_MASK) != 0) {
1445					if (in_syscall(pt))
1446						convert_to_non_syscall(target,
1447								       pt,
1448								       cfm);
1449					pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
1450						      | (*data & PFM_MASK));
1451				}
1452			} else
1453				*data = cfm;
1454			return 0;
1455		case ELF_CR_IPSR_OFFSET:
1456			if (write_access) {
1457				unsigned long tmp = *data;
1458				/* psr.ri==3 is a reserved value: SDM 2:25 */
1459				if ((tmp & IA64_PSR_RI) == IA64_PSR_RI)
1460					tmp &= ~IA64_PSR_RI;
1461				pt->cr_ipsr = ((tmp & IPSR_MASK)
1462					       | (pt->cr_ipsr & ~IPSR_MASK));
1463			} else
1464				*data = (pt->cr_ipsr & IPSR_MASK);
1465			return 0;
1466		}
1467	} else if (addr == ELF_NAT_OFFSET)
1468		return access_nat_bits(target, pt, info,
1469				       data, write_access);
1470	else if (addr == ELF_PR_OFFSET)
1471		ptr = &pt->pr;
1472	else
1473		return -1;
1474
1475	if (write_access)
1476		*ptr = *data;
1477	else
1478		*data = *ptr;
1479
1480	return 0;
1481}
1482
1483static int
1484access_elf_reg(struct task_struct *target, struct unw_frame_info *info,
1485		unsigned long addr, unsigned long *data, int write_access)
1486{
1487	if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(15))
1488		return access_elf_gpreg(target, info, addr, data, write_access);
1489	else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7))
1490		return access_elf_breg(target, info, addr, data, write_access);
1491	else
1492		return access_elf_areg(target, info, addr, data, write_access);
1493}
1494
1495void do_gpregs_get(struct unw_frame_info *info, void *arg)
1496{
1497	struct pt_regs *pt;
1498	struct regset_getset *dst = arg;
1499	elf_greg_t tmp[16];
1500	unsigned int i, index, min_copy;
1501
1502	if (unw_unwind_to_user(info) < 0)
1503		return;
1504
1505	/*
1506	 * coredump format:
1507	 *      r0-r31
1508	 *      NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
1509	 *      predicate registers (p0-p63)
1510	 *      b0-b7
1511	 *      ip cfm user-mask
1512	 *      ar.rsc ar.bsp ar.bspstore ar.rnat
1513	 *      ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
1514	 */
1515
1516
1517	/* Skip r0 */
1518	if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1519		dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1520						      &dst->u.get.kbuf,
1521						      &dst->u.get.ubuf,
1522						      0, ELF_GR_OFFSET(1));
1523		if (dst->ret || dst->count == 0)
1524			return;
1525	}
1526
1527	/* gr1 - gr15 */
1528	if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1529		index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1530		min_copy = ELF_GR_OFFSET(16) > (dst->pos + dst->count) ?
1531			 (dst->pos + dst->count) : ELF_GR_OFFSET(16);
1532		for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1533				index++)
1534			if (access_elf_reg(dst->target, info, i,
1535						&tmp[index], 0) < 0) {
1536				dst->ret = -EIO;
1537				return;
1538			}
1539		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1540				&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1541				ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1542		if (dst->ret || dst->count == 0)
1543			return;
1544	}
1545
1546	/* r16-r31 */
1547	if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1548		pt = task_pt_regs(dst->target);
1549		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1550				&dst->u.get.kbuf, &dst->u.get.ubuf, &pt->r16,
1551				ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
1552		if (dst->ret || dst->count == 0)
1553			return;
1554	}
1555
1556	/* nat, pr, b0 - b7 */
1557	if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1558		index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1559		min_copy = ELF_CR_IIP_OFFSET > (dst->pos + dst->count) ?
1560			 (dst->pos + dst->count) : ELF_CR_IIP_OFFSET;
1561		for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1562				index++)
1563			if (access_elf_reg(dst->target, info, i,
1564						&tmp[index], 0) < 0) {
1565				dst->ret = -EIO;
1566				return;
1567			}
1568		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1569				&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1570				ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
1571		if (dst->ret || dst->count == 0)
1572			return;
1573	}
1574
1575	/* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
1576	 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
1577	 */
1578	if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1579		index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1580		min_copy = ELF_AR_END_OFFSET > (dst->pos + dst->count) ?
1581			 (dst->pos + dst->count) : ELF_AR_END_OFFSET;
1582		for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1583				index++)
1584			if (access_elf_reg(dst->target, info, i,
1585						&tmp[index], 0) < 0) {
1586				dst->ret = -EIO;
1587				return;
1588			}
1589		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1590				&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1591				ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
1592	}
1593}
1594
1595void do_gpregs_set(struct unw_frame_info *info, void *arg)
1596{
1597	struct pt_regs *pt;
1598	struct regset_getset *dst = arg;
1599	elf_greg_t tmp[16];
1600	unsigned int i, index;
1601
1602	if (unw_unwind_to_user(info) < 0)
1603		return;
1604
1605	/* Skip r0 */
1606	if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1607		dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1608						       &dst->u.set.kbuf,
1609						       &dst->u.set.ubuf,
1610						       0, ELF_GR_OFFSET(1));
1611		if (dst->ret || dst->count == 0)
1612			return;
1613	}
1614
1615	/* gr1-gr15 */
1616	if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1617		i = dst->pos;
1618		index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1619		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1620				&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1621				ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1622		if (dst->ret)
1623			return;
1624		for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
1625			if (access_elf_reg(dst->target, info, i,
1626						&tmp[index], 1) < 0) {
1627				dst->ret = -EIO;
1628				return;
1629			}
1630		if (dst->count == 0)
1631			return;
1632	}
1633
1634	/* gr16-gr31 */
1635	if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1636		pt = task_pt_regs(dst->target);
1637		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1638				&dst->u.set.kbuf, &dst->u.set.ubuf, &pt->r16,
1639				ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
1640		if (dst->ret || dst->count == 0)
1641			return;
1642	}
1643
1644	/* nat, pr, b0 - b7 */
1645	if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1646		i = dst->pos;
1647		index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1648		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1649				&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1650				ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
1651		if (dst->ret)
1652			return;
1653		for (; i < dst->pos; i += sizeof(elf_greg_t), index++)
1654			if (access_elf_reg(dst->target, info, i,
1655						&tmp[index], 1) < 0) {
1656				dst->ret = -EIO;
1657				return;
1658			}
1659		if (dst->count == 0)
1660			return;
1661	}
1662
1663	/* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
1664	 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
1665	 */
1666	if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1667		i = dst->pos;
1668		index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1669		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1670				&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1671				ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
1672		if (dst->ret)
1673			return;
1674		for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
1675			if (access_elf_reg(dst->target, info, i,
1676						&tmp[index], 1) < 0) {
1677				dst->ret = -EIO;
1678				return;
1679			}
1680	}
1681}
1682
1683#define ELF_FP_OFFSET(i)	(i * sizeof(elf_fpreg_t))
1684
1685void do_fpregs_get(struct unw_frame_info *info, void *arg)
1686{
1687	struct regset_getset *dst = arg;
1688	struct task_struct *task = dst->target;
1689	elf_fpreg_t tmp[30];
1690	int index, min_copy, i;
1691
1692	if (unw_unwind_to_user(info) < 0)
1693		return;
1694
1695	/* Skip pos 0 and 1 */
1696	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1697		dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1698						      &dst->u.get.kbuf,
1699						      &dst->u.get.ubuf,
1700						      0, ELF_FP_OFFSET(2));
1701		if (dst->count == 0 || dst->ret)
1702			return;
1703	}
1704
1705	/* fr2-fr31 */
1706	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1707		index = (dst->pos - ELF_FP_OFFSET(2)) / sizeof(elf_fpreg_t);
1708
1709		min_copy = min(((unsigned int)ELF_FP_OFFSET(32)),
1710				dst->pos + dst->count);
1711		for (i = dst->pos; i < min_copy; i += sizeof(elf_fpreg_t),
1712				index++)
1713			if (unw_get_fr(info, i / sizeof(elf_fpreg_t),
1714					 &tmp[index])) {
1715				dst->ret = -EIO;
1716				return;
1717			}
1718		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1719				&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1720				ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1721		if (dst->count == 0 || dst->ret)
1722			return;
1723	}
1724
1725	/* fph */
1726	if (dst->count > 0) {
1727		ia64_flush_fph(dst->target);
1728		if (task->thread.flags & IA64_THREAD_FPH_VALID)
1729			dst->ret = user_regset_copyout(
1730				&dst->pos, &dst->count,
1731				&dst->u.get.kbuf, &dst->u.get.ubuf,
1732				&dst->target->thread.fph,
1733				ELF_FP_OFFSET(32), -1);
1734		else
1735			/* Zero fill instead.  */
1736			dst->ret = user_regset_copyout_zero(
1737				&dst->pos, &dst->count,
1738				&dst->u.get.kbuf, &dst->u.get.ubuf,
1739				ELF_FP_OFFSET(32), -1);
1740	}
1741}
1742
1743void do_fpregs_set(struct unw_frame_info *info, void *arg)
1744{
1745	struct regset_getset *dst = arg;
1746	elf_fpreg_t fpreg, tmp[30];
1747	int index, start, end;
1748
1749	if (unw_unwind_to_user(info) < 0)
1750		return;
1751
1752	/* Skip pos 0 and 1 */
1753	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1754		dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1755						       &dst->u.set.kbuf,
1756						       &dst->u.set.ubuf,
1757						       0, ELF_FP_OFFSET(2));
1758		if (dst->count == 0 || dst->ret)
1759			return;
1760	}
1761
1762	/* fr2-fr31 */
1763	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1764		start = dst->pos;
1765		end = min(((unsigned int)ELF_FP_OFFSET(32)),
1766			 dst->pos + dst->count);
1767		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1768				&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1769				ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1770		if (dst->ret)
1771			return;
1772
1773		if (start & 0xF) { /* only write high part */
1774			if (unw_get_fr(info, start / sizeof(elf_fpreg_t),
1775					 &fpreg)) {
1776				dst->ret = -EIO;
1777				return;
1778			}
1779			tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0]
1780				= fpreg.u.bits[0];
1781			start &= ~0xFUL;
1782		}
1783		if (end & 0xF) { /* only write low part */
1784			if (unw_get_fr(info, end / sizeof(elf_fpreg_t),
1785					&fpreg)) {
1786				dst->ret = -EIO;
1787				return;
1788			}
1789			tmp[end / sizeof(elf_fpreg_t) - 2].u.bits[1]
1790				= fpreg.u.bits[1];
1791			end = (end + 0xF) & ~0xFUL;
1792		}
1793
1794		for ( ;	start < end ; start += sizeof(elf_fpreg_t)) {
1795			index = start / sizeof(elf_fpreg_t);
1796			if (unw_set_fr(info, index, tmp[index - 2])) {
1797				dst->ret = -EIO;
1798				return;
1799			}
1800		}
1801		if (dst->ret || dst->count == 0)
1802			return;
1803	}
1804
1805	/* fph */
1806	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) {
1807		ia64_sync_fph(dst->target);
1808		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1809						&dst->u.set.kbuf,
1810						&dst->u.set.ubuf,
1811						&dst->target->thread.fph,
1812						ELF_FP_OFFSET(32), -1);
1813	}
1814}
1815
1816static int
1817do_regset_call(void (*call)(struct unw_frame_info *, void *),
1818	       struct task_struct *target,
1819	       const struct user_regset *regset,
1820	       unsigned int pos, unsigned int count,
1821	       const void *kbuf, const void __user *ubuf)
1822{
1823	struct regset_getset info = { .target = target, .regset = regset,
1824				 .pos = pos, .count = count,
1825				 .u.set = { .kbuf = kbuf, .ubuf = ubuf },
1826				 .ret = 0 };
1827
1828	if (target == current)
1829		unw_init_running(call, &info);
1830	else {
1831		struct unw_frame_info ufi;
1832		memset(&ufi, 0, sizeof(ufi));
1833		unw_init_from_blocked_task(&ufi, target);
1834		(*call)(&ufi, &info);
1835	}
1836
1837	return info.ret;
1838}
1839
1840static int
1841gpregs_get(struct task_struct *target,
1842	   const struct user_regset *regset,
1843	   unsigned int pos, unsigned int count,
1844	   void *kbuf, void __user *ubuf)
1845{
1846	return do_regset_call(do_gpregs_get, target, regset, pos, count,
1847		kbuf, ubuf);
1848}
1849
1850static int gpregs_set(struct task_struct *target,
1851		const struct user_regset *regset,
1852		unsigned int pos, unsigned int count,
1853		const void *kbuf, const void __user *ubuf)
1854{
1855	return do_regset_call(do_gpregs_set, target, regset, pos, count,
1856		kbuf, ubuf);
1857}
1858
1859static void do_gpregs_writeback(struct unw_frame_info *info, void *arg)
1860{
1861	do_sync_rbs(info, ia64_sync_user_rbs);
1862}
1863
1864/*
1865 * This is called to write back the register backing store.
1866 * ptrace does this before it stops, so that a tracer reading the user
1867 * memory after the thread stops will get the current register data.
1868 */
1869static int
1870gpregs_writeback(struct task_struct *target,
1871		 const struct user_regset *regset,
1872		 int now)
1873{
1874	if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE))
1875		return 0;
1876	set_notify_resume(target);
1877	return do_regset_call(do_gpregs_writeback, target, regset, 0, 0,
1878		NULL, NULL);
1879}
1880
1881static int
1882fpregs_active(struct task_struct *target, const struct user_regset *regset)
1883{
1884	return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32;
1885}
1886
1887static int fpregs_get(struct task_struct *target,
1888		const struct user_regset *regset,
1889		unsigned int pos, unsigned int count,
1890		void *kbuf, void __user *ubuf)
1891{
1892	return do_regset_call(do_fpregs_get, target, regset, pos, count,
1893		kbuf, ubuf);
1894}
1895
1896static int fpregs_set(struct task_struct *target,
1897		const struct user_regset *regset,
1898		unsigned int pos, unsigned int count,
1899		const void *kbuf, const void __user *ubuf)
1900{
1901	return do_regset_call(do_fpregs_set, target, regset, pos, count,
1902		kbuf, ubuf);
1903}
1904
1905static int
1906access_uarea(struct task_struct *child, unsigned long addr,
1907	      unsigned long *data, int write_access)
1908{
1909	unsigned int pos = -1; /* an invalid value */
1910	int ret;
1911	unsigned long *ptr, regnum;
1912
1913	if ((addr & 0x7) != 0) {
1914		dprintk("ptrace: unaligned register address 0x%lx\n", addr);
1915		return -1;
1916	}
1917	if ((addr >= PT_NAT_BITS + 8 && addr < PT_F2) ||
1918		(addr >= PT_R7 + 8 && addr < PT_B1) ||
1919		(addr >= PT_AR_LC + 8 && addr < PT_CR_IPSR) ||
1920		(addr >= PT_AR_SSD + 8 && addr < PT_DBR)) {
1921		dprintk("ptrace: rejecting access to register "
1922					"address 0x%lx\n", addr);
1923		return -1;
1924	}
1925
1926	switch (addr) {
1927	case PT_F32 ... (PT_F127 + 15):
1928		pos = addr - PT_F32 + ELF_FP_OFFSET(32);
1929		break;
1930	case PT_F2 ... (PT_F5 + 15):
1931		pos = addr - PT_F2 + ELF_FP_OFFSET(2);
1932		break;
1933	case PT_F10 ... (PT_F31 + 15):
1934		pos = addr - PT_F10 + ELF_FP_OFFSET(10);
1935		break;
1936	case PT_F6 ... (PT_F9 + 15):
1937		pos = addr - PT_F6 + ELF_FP_OFFSET(6);
1938		break;
1939	}
1940
1941	if (pos != -1) {
1942		if (write_access)
1943			ret = fpregs_set(child, NULL, pos,
1944				sizeof(unsigned long), data, NULL);
1945		else
1946			ret = fpregs_get(child, NULL, pos,
1947				sizeof(unsigned long), data, NULL);
1948		if (ret != 0)
1949			return -1;
1950		return 0;
1951	}
1952
1953	switch (addr) {
1954	case PT_NAT_BITS:
1955		pos = ELF_NAT_OFFSET;
1956		break;
1957	case PT_R4 ... PT_R7:
1958		pos = addr - PT_R4 + ELF_GR_OFFSET(4);
1959		break;
1960	case PT_B1 ... PT_B5:
1961		pos = addr - PT_B1 + ELF_BR_OFFSET(1);
1962		break;
1963	case PT_AR_EC:
1964		pos = ELF_AR_EC_OFFSET;
1965		break;
1966	case PT_AR_LC:
1967		pos = ELF_AR_LC_OFFSET;
1968		break;
1969	case PT_CR_IPSR:
1970		pos = ELF_CR_IPSR_OFFSET;
1971		break;
1972	case PT_CR_IIP:
1973		pos = ELF_CR_IIP_OFFSET;
1974		break;
1975	case PT_CFM:
1976		pos = ELF_CFM_OFFSET;
1977		break;
1978	case PT_AR_UNAT:
1979		pos = ELF_AR_UNAT_OFFSET;
1980		break;
1981	case PT_AR_PFS:
1982		pos = ELF_AR_PFS_OFFSET;
1983		break;
1984	case PT_AR_RSC:
1985		pos = ELF_AR_RSC_OFFSET;
1986		break;
1987	case PT_AR_RNAT:
1988		pos = ELF_AR_RNAT_OFFSET;
1989		break;
1990	case PT_AR_BSPSTORE:
1991		pos = ELF_AR_BSPSTORE_OFFSET;
1992		break;
1993	case PT_PR:
1994		pos = ELF_PR_OFFSET;
1995		break;
1996	case PT_B6:
1997		pos = ELF_BR_OFFSET(6);
1998		break;
1999	case PT_AR_BSP:
2000		pos = ELF_AR_BSP_OFFSET;
2001		break;
2002	case PT_R1 ... PT_R3:
2003		pos = addr - PT_R1 + ELF_GR_OFFSET(1);
2004		break;
2005	case PT_R12 ... PT_R15:
2006		pos = addr - PT_R12 + ELF_GR_OFFSET(12);
2007		break;
2008	case PT_R8 ... PT_R11:
2009		pos = addr - PT_R8 + ELF_GR_OFFSET(8);
2010		break;
2011	case PT_R16 ... PT_R31:
2012		pos = addr - PT_R16 + ELF_GR_OFFSET(16);
2013		break;
2014	case PT_AR_CCV:
2015		pos = ELF_AR_CCV_OFFSET;
2016		break;
2017	case PT_AR_FPSR:
2018		pos = ELF_AR_FPSR_OFFSET;
2019		break;
2020	case PT_B0:
2021		pos = ELF_BR_OFFSET(0);
2022		break;
2023	case PT_B7:
2024		pos = ELF_BR_OFFSET(7);
2025		break;
2026	case PT_AR_CSD:
2027		pos = ELF_AR_CSD_OFFSET;
2028		break;
2029	case PT_AR_SSD:
2030		pos = ELF_AR_SSD_OFFSET;
2031		break;
2032	}
2033
2034	if (pos != -1) {
2035		if (write_access)
2036			ret = gpregs_set(child, NULL, pos,
2037				sizeof(unsigned long), data, NULL);
2038		else
2039			ret = gpregs_get(child, NULL, pos,
2040				sizeof(unsigned long), data, NULL);
2041		if (ret != 0)
2042			return -1;
2043		return 0;
2044	}
2045
2046	/* access debug registers */
2047	if (addr >= PT_IBR) {
2048		regnum = (addr - PT_IBR) >> 3;
2049		ptr = &child->thread.ibr[0];
2050	} else {
2051		regnum = (addr - PT_DBR) >> 3;
2052		ptr = &child->thread.dbr[0];
2053	}
2054
2055	if (regnum >= 8) {
2056		dprintk("ptrace: rejecting access to register "
2057				"address 0x%lx\n", addr);
2058		return -1;
2059	}
2060#ifdef CONFIG_PERFMON
2061	/*
2062	 * Check if debug registers are used by perfmon. This
2063	 * test must be done once we know that we can do the
2064	 * operation, i.e. the arguments are all valid, but
2065	 * before we start modifying the state.
2066	 *
2067	 * Perfmon needs to keep a count of how many processes
2068	 * are trying to modify the debug registers for system
2069	 * wide monitoring sessions.
2070	 *
2071	 * We also include read access here, because they may
2072	 * cause the PMU-installed debug register state
2073	 * (dbr[], ibr[]) to be reset. The two arrays are also
2074	 * used by perfmon, but we do not use
2075	 * IA64_THREAD_DBG_VALID. The registers are restored
2076	 * by the PMU context switch code.
2077	 */
2078	if (pfm_use_debug_registers(child))
2079		return -1;
2080#endif
2081
2082	if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
2083		child->thread.flags |= IA64_THREAD_DBG_VALID;
2084		memset(child->thread.dbr, 0,
2085				sizeof(child->thread.dbr));
2086		memset(child->thread.ibr, 0,
2087				sizeof(child->thread.ibr));
2088	}
2089
2090	ptr += regnum;
2091
2092	if ((regnum & 1) && write_access) {
2093		/* don't let the user set kernel-level breakpoints: */
2094		*ptr = *data & ~(7UL << 56);
2095		return 0;
2096	}
2097	if (write_access)
2098		*ptr = *data;
2099	else
2100		*data = *ptr;
2101	return 0;
2102}
2103
2104static const struct user_regset native_regsets[] = {
2105	{
2106		.core_note_type = NT_PRSTATUS,
2107		.n = ELF_NGREG,
2108		.size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t),
2109		.get = gpregs_get, .set = gpregs_set,
2110		.writeback = gpregs_writeback
2111	},
2112	{
2113		.core_note_type = NT_PRFPREG,
2114		.n = ELF_NFPREG,
2115		.size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t),
2116		.get = fpregs_get, .set = fpregs_set, .active = fpregs_active
2117	},
2118};
2119
2120static const struct user_regset_view user_ia64_view = {
2121	.name = "ia64",
2122	.e_machine = EM_IA_64,
2123	.regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
2124};
2125
2126const struct user_regset_view *task_user_regset_view(struct task_struct *tsk)
2127{
2128	return &user_ia64_view;
2129}
2130
2131struct syscall_get_set_args {
2132	unsigned int i;
2133	unsigned int n;
2134	unsigned long *args;
2135	struct pt_regs *regs;
2136	int rw;
2137};
2138
2139static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
2140{
2141	struct syscall_get_set_args *args = data;
2142	struct pt_regs *pt = args->regs;
2143	unsigned long *krbs, cfm, ndirty;
2144	int i, count;
2145
2146	if (unw_unwind_to_user(info) < 0)
2147		return;
2148
2149	cfm = pt->cr_ifs;
2150	krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
2151	ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
2152
2153	count = 0;
2154	if (in_syscall(pt))
2155		count = min_t(int, args->n, cfm & 0x7f);
2156
2157	for (i = 0; i < count; i++) {
2158		if (args->rw)
2159			*ia64_rse_skip_regs(krbs, ndirty + i + args->i) =
2160				args->args[i];
2161		else
2162			args->args[i] = *ia64_rse_skip_regs(krbs,
2163				ndirty + i + args->i);
2164	}
2165
2166	if (!args->rw) {
2167		while (i < args->n) {
2168			args->args[i] = 0;
2169			i++;
2170		}
2171	}
2172}
2173
2174void ia64_syscall_get_set_arguments(struct task_struct *task,
2175	struct pt_regs *regs, unsigned int i, unsigned int n,
2176	unsigned long *args, int rw)
2177{
2178	struct syscall_get_set_args data = {
2179		.i = i,
2180		.n = n,
2181		.args = args,
2182		.regs = regs,
2183		.rw = rw,
2184	};
2185
2186	if (task == current)
2187		unw_init_running(syscall_get_set_args_cb, &data);
2188	else {
2189		struct unw_frame_info ufi;
2190		memset(&ufi, 0, sizeof(ufi));
2191		unw_init_from_blocked_task(&ufi, task);
2192		syscall_get_set_args_cb(&ufi, &data);
2193	}
2194}
2195