1/*
2 * Based on arch/arm/kernel/ptrace.c
3 *
4 * By Ross Biro 1/23/92
5 * edited by Linus Torvalds
6 * ARM modifications Copyright (C) 2000 Russell King
7 * Copyright (C) 2012 ARM Ltd.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include <linux/audit.h>
23#include <linux/compat.h>
24#include <linux/kernel.h>
25#include <linux/sched.h>
26#include <linux/mm.h>
27#include <linux/smp.h>
28#include <linux/ptrace.h>
29#include <linux/user.h>
30#include <linux/seccomp.h>
31#include <linux/security.h>
32#include <linux/init.h>
33#include <linux/signal.h>
34#include <linux/uaccess.h>
35#include <linux/perf_event.h>
36#include <linux/hw_breakpoint.h>
37#include <linux/regset.h>
38#include <linux/tracehook.h>
39#include <linux/elf.h>
40
41#include <asm/compat.h>
42#include <asm/debug-monitors.h>
43#include <asm/pgtable.h>
44#include <asm/syscall.h>
45#include <asm/traps.h>
46#include <asm/system_misc.h>
47
48#define CREATE_TRACE_POINTS
49#include <trace/events/syscalls.h>
50
51/*
52 * TODO: does not yet catch signals sent when the child dies.
53 * in exit.c or in signal.c.
54 */
55
56/*
57 * Called by kernel/ptrace.c when detaching..
58 */
59void ptrace_disable(struct task_struct *child)
60{
61	/*
62	 * This would be better off in core code, but PTRACE_DETACH has
63	 * grown its fair share of arch-specific worts and changing it
64	 * is likely to cause regressions on obscure architectures.
65	 */
66	user_disable_single_step(child);
67}
68
69#ifdef CONFIG_HAVE_HW_BREAKPOINT
70/*
71 * Handle hitting a HW-breakpoint.
72 */
73static void ptrace_hbptriggered(struct perf_event *bp,
74				struct perf_sample_data *data,
75				struct pt_regs *regs)
76{
77	struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
78	siginfo_t info = {
79		.si_signo	= SIGTRAP,
80		.si_errno	= 0,
81		.si_code	= TRAP_HWBKPT,
82		.si_addr	= (void __user *)(bkpt->trigger),
83	};
84
85#ifdef CONFIG_COMPAT
86	int i;
87
88	if (!is_compat_task())
89		goto send_sig;
90
91	for (i = 0; i < ARM_MAX_BRP; ++i) {
92		if (current->thread.debug.hbp_break[i] == bp) {
93			info.si_errno = (i << 1) + 1;
94			break;
95		}
96	}
97
98	for (i = 0; i < ARM_MAX_WRP; ++i) {
99		if (current->thread.debug.hbp_watch[i] == bp) {
100			info.si_errno = -((i << 1) + 1);
101			break;
102		}
103	}
104
105send_sig:
106#endif
107	force_sig_info(SIGTRAP, &info, current);
108}
109
110/*
111 * Unregister breakpoints from this task and reset the pointers in
112 * the thread_struct.
113 */
114void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
115{
116	int i;
117	struct thread_struct *t = &tsk->thread;
118
119	for (i = 0; i < ARM_MAX_BRP; i++) {
120		if (t->debug.hbp_break[i]) {
121			unregister_hw_breakpoint(t->debug.hbp_break[i]);
122			t->debug.hbp_break[i] = NULL;
123		}
124	}
125
126	for (i = 0; i < ARM_MAX_WRP; i++) {
127		if (t->debug.hbp_watch[i]) {
128			unregister_hw_breakpoint(t->debug.hbp_watch[i]);
129			t->debug.hbp_watch[i] = NULL;
130		}
131	}
132}
133
134void ptrace_hw_copy_thread(struct task_struct *tsk)
135{
136	memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
137}
138
139static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
140					       struct task_struct *tsk,
141					       unsigned long idx)
142{
143	struct perf_event *bp = ERR_PTR(-EINVAL);
144
145	switch (note_type) {
146	case NT_ARM_HW_BREAK:
147		if (idx < ARM_MAX_BRP)
148			bp = tsk->thread.debug.hbp_break[idx];
149		break;
150	case NT_ARM_HW_WATCH:
151		if (idx < ARM_MAX_WRP)
152			bp = tsk->thread.debug.hbp_watch[idx];
153		break;
154	}
155
156	return bp;
157}
158
159static int ptrace_hbp_set_event(unsigned int note_type,
160				struct task_struct *tsk,
161				unsigned long idx,
162				struct perf_event *bp)
163{
164	int err = -EINVAL;
165
166	switch (note_type) {
167	case NT_ARM_HW_BREAK:
168		if (idx < ARM_MAX_BRP) {
169			tsk->thread.debug.hbp_break[idx] = bp;
170			err = 0;
171		}
172		break;
173	case NT_ARM_HW_WATCH:
174		if (idx < ARM_MAX_WRP) {
175			tsk->thread.debug.hbp_watch[idx] = bp;
176			err = 0;
177		}
178		break;
179	}
180
181	return err;
182}
183
184static struct perf_event *ptrace_hbp_create(unsigned int note_type,
185					    struct task_struct *tsk,
186					    unsigned long idx)
187{
188	struct perf_event *bp;
189	struct perf_event_attr attr;
190	int err, type;
191
192	switch (note_type) {
193	case NT_ARM_HW_BREAK:
194		type = HW_BREAKPOINT_X;
195		break;
196	case NT_ARM_HW_WATCH:
197		type = HW_BREAKPOINT_RW;
198		break;
199	default:
200		return ERR_PTR(-EINVAL);
201	}
202
203	ptrace_breakpoint_init(&attr);
204
205	/*
206	 * Initialise fields to sane defaults
207	 * (i.e. values that will pass validation).
208	 */
209	attr.bp_addr	= 0;
210	attr.bp_len	= HW_BREAKPOINT_LEN_4;
211	attr.bp_type	= type;
212	attr.disabled	= 1;
213
214	bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
215	if (IS_ERR(bp))
216		return bp;
217
218	err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
219	if (err)
220		return ERR_PTR(err);
221
222	return bp;
223}
224
225static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
226				     struct arch_hw_breakpoint_ctrl ctrl,
227				     struct perf_event_attr *attr)
228{
229	int err, len, type, disabled = !ctrl.enabled;
230
231	attr->disabled = disabled;
232	if (disabled)
233		return 0;
234
235	err = arch_bp_generic_fields(ctrl, &len, &type);
236	if (err)
237		return err;
238
239	switch (note_type) {
240	case NT_ARM_HW_BREAK:
241		if ((type & HW_BREAKPOINT_X) != type)
242			return -EINVAL;
243		break;
244	case NT_ARM_HW_WATCH:
245		if ((type & HW_BREAKPOINT_RW) != type)
246			return -EINVAL;
247		break;
248	default:
249		return -EINVAL;
250	}
251
252	attr->bp_len	= len;
253	attr->bp_type	= type;
254
255	return 0;
256}
257
258static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
259{
260	u8 num;
261	u32 reg = 0;
262
263	switch (note_type) {
264	case NT_ARM_HW_BREAK:
265		num = hw_breakpoint_slots(TYPE_INST);
266		break;
267	case NT_ARM_HW_WATCH:
268		num = hw_breakpoint_slots(TYPE_DATA);
269		break;
270	default:
271		return -EINVAL;
272	}
273
274	reg |= debug_monitors_arch();
275	reg <<= 8;
276	reg |= num;
277
278	*info = reg;
279	return 0;
280}
281
282static int ptrace_hbp_get_ctrl(unsigned int note_type,
283			       struct task_struct *tsk,
284			       unsigned long idx,
285			       u32 *ctrl)
286{
287	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
288
289	if (IS_ERR(bp))
290		return PTR_ERR(bp);
291
292	*ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
293	return 0;
294}
295
296static int ptrace_hbp_get_addr(unsigned int note_type,
297			       struct task_struct *tsk,
298			       unsigned long idx,
299			       u64 *addr)
300{
301	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
302
303	if (IS_ERR(bp))
304		return PTR_ERR(bp);
305
306	*addr = bp ? bp->attr.bp_addr : 0;
307	return 0;
308}
309
310static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
311							struct task_struct *tsk,
312							unsigned long idx)
313{
314	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
315
316	if (!bp)
317		bp = ptrace_hbp_create(note_type, tsk, idx);
318
319	return bp;
320}
321
322static int ptrace_hbp_set_ctrl(unsigned int note_type,
323			       struct task_struct *tsk,
324			       unsigned long idx,
325			       u32 uctrl)
326{
327	int err;
328	struct perf_event *bp;
329	struct perf_event_attr attr;
330	struct arch_hw_breakpoint_ctrl ctrl;
331
332	bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
333	if (IS_ERR(bp)) {
334		err = PTR_ERR(bp);
335		return err;
336	}
337
338	attr = bp->attr;
339	decode_ctrl_reg(uctrl, &ctrl);
340	err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
341	if (err)
342		return err;
343
344	return modify_user_hw_breakpoint(bp, &attr);
345}
346
347static int ptrace_hbp_set_addr(unsigned int note_type,
348			       struct task_struct *tsk,
349			       unsigned long idx,
350			       u64 addr)
351{
352	int err;
353	struct perf_event *bp;
354	struct perf_event_attr attr;
355
356	bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
357	if (IS_ERR(bp)) {
358		err = PTR_ERR(bp);
359		return err;
360	}
361
362	attr = bp->attr;
363	attr.bp_addr = addr;
364	err = modify_user_hw_breakpoint(bp, &attr);
365	return err;
366}
367
368#define PTRACE_HBP_ADDR_SZ	sizeof(u64)
369#define PTRACE_HBP_CTRL_SZ	sizeof(u32)
370#define PTRACE_HBP_PAD_SZ	sizeof(u32)
371
372static int hw_break_get(struct task_struct *target,
373			const struct user_regset *regset,
374			unsigned int pos, unsigned int count,
375			void *kbuf, void __user *ubuf)
376{
377	unsigned int note_type = regset->core_note_type;
378	int ret, idx = 0, offset, limit;
379	u32 info, ctrl;
380	u64 addr;
381
382	/* Resource info */
383	ret = ptrace_hbp_get_resource_info(note_type, &info);
384	if (ret)
385		return ret;
386
387	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0,
388				  sizeof(info));
389	if (ret)
390		return ret;
391
392	/* Pad */
393	offset = offsetof(struct user_hwdebug_state, pad);
394	ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offset,
395				       offset + PTRACE_HBP_PAD_SZ);
396	if (ret)
397		return ret;
398
399	/* (address, ctrl) registers */
400	offset = offsetof(struct user_hwdebug_state, dbg_regs);
401	limit = regset->n * regset->size;
402	while (count && offset < limit) {
403		ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
404		if (ret)
405			return ret;
406		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr,
407					  offset, offset + PTRACE_HBP_ADDR_SZ);
408		if (ret)
409			return ret;
410		offset += PTRACE_HBP_ADDR_SZ;
411
412		ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
413		if (ret)
414			return ret;
415		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl,
416					  offset, offset + PTRACE_HBP_CTRL_SZ);
417		if (ret)
418			return ret;
419		offset += PTRACE_HBP_CTRL_SZ;
420
421		ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
422					       offset,
423					       offset + PTRACE_HBP_PAD_SZ);
424		if (ret)
425			return ret;
426		offset += PTRACE_HBP_PAD_SZ;
427		idx++;
428	}
429
430	return 0;
431}
432
433static int hw_break_set(struct task_struct *target,
434			const struct user_regset *regset,
435			unsigned int pos, unsigned int count,
436			const void *kbuf, const void __user *ubuf)
437{
438	unsigned int note_type = regset->core_note_type;
439	int ret, idx = 0, offset, limit;
440	u32 ctrl;
441	u64 addr;
442
443	/* Resource info and pad */
444	offset = offsetof(struct user_hwdebug_state, dbg_regs);
445	ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
446	if (ret)
447		return ret;
448
449	/* (address, ctrl) registers */
450	limit = regset->n * regset->size;
451	while (count && offset < limit) {
452		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
453					 offset, offset + PTRACE_HBP_ADDR_SZ);
454		if (ret)
455			return ret;
456		ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
457		if (ret)
458			return ret;
459		offset += PTRACE_HBP_ADDR_SZ;
460
461		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
462					 offset, offset + PTRACE_HBP_CTRL_SZ);
463		if (ret)
464			return ret;
465		ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
466		if (ret)
467			return ret;
468		offset += PTRACE_HBP_CTRL_SZ;
469
470		ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
471						offset,
472						offset + PTRACE_HBP_PAD_SZ);
473		if (ret)
474			return ret;
475		offset += PTRACE_HBP_PAD_SZ;
476		idx++;
477	}
478
479	return 0;
480}
481#endif	/* CONFIG_HAVE_HW_BREAKPOINT */
482
483static int gpr_get(struct task_struct *target,
484		   const struct user_regset *regset,
485		   unsigned int pos, unsigned int count,
486		   void *kbuf, void __user *ubuf)
487{
488	struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
489	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
490}
491
492static int gpr_set(struct task_struct *target, const struct user_regset *regset,
493		   unsigned int pos, unsigned int count,
494		   const void *kbuf, const void __user *ubuf)
495{
496	int ret;
497	struct user_pt_regs newregs;
498
499	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
500	if (ret)
501		return ret;
502
503	if (!valid_user_regs(&newregs))
504		return -EINVAL;
505
506	task_pt_regs(target)->user_regs = newregs;
507	return 0;
508}
509
510/*
511 * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
512 */
513static int fpr_get(struct task_struct *target, const struct user_regset *regset,
514		   unsigned int pos, unsigned int count,
515		   void *kbuf, void __user *ubuf)
516{
517	struct user_fpsimd_state *uregs;
518	uregs = &target->thread.fpsimd_state.user_fpsimd;
519	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
520}
521
522static int fpr_set(struct task_struct *target, const struct user_regset *regset,
523		   unsigned int pos, unsigned int count,
524		   const void *kbuf, const void __user *ubuf)
525{
526	int ret;
527	struct user_fpsimd_state newstate;
528
529	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
530	if (ret)
531		return ret;
532
533	target->thread.fpsimd_state.user_fpsimd = newstate;
534	fpsimd_flush_task_state(target);
535	return ret;
536}
537
538static int tls_get(struct task_struct *target, const struct user_regset *regset,
539		   unsigned int pos, unsigned int count,
540		   void *kbuf, void __user *ubuf)
541{
542	unsigned long *tls = &target->thread.tp_value;
543	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1);
544}
545
546static int tls_set(struct task_struct *target, const struct user_regset *regset,
547		   unsigned int pos, unsigned int count,
548		   const void *kbuf, const void __user *ubuf)
549{
550	int ret;
551	unsigned long tls;
552
553	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
554	if (ret)
555		return ret;
556
557	target->thread.tp_value = tls;
558	return ret;
559}
560
561static int system_call_get(struct task_struct *target,
562			   const struct user_regset *regset,
563			   unsigned int pos, unsigned int count,
564			   void *kbuf, void __user *ubuf)
565{
566	int syscallno = task_pt_regs(target)->syscallno;
567
568	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
569				   &syscallno, 0, -1);
570}
571
572static int system_call_set(struct task_struct *target,
573			   const struct user_regset *regset,
574			   unsigned int pos, unsigned int count,
575			   const void *kbuf, const void __user *ubuf)
576{
577	int syscallno, ret;
578
579	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
580	if (ret)
581		return ret;
582
583	task_pt_regs(target)->syscallno = syscallno;
584	return ret;
585}
586
587enum aarch64_regset {
588	REGSET_GPR,
589	REGSET_FPR,
590	REGSET_TLS,
591#ifdef CONFIG_HAVE_HW_BREAKPOINT
592	REGSET_HW_BREAK,
593	REGSET_HW_WATCH,
594#endif
595	REGSET_SYSTEM_CALL,
596};
597
598static const struct user_regset aarch64_regsets[] = {
599	[REGSET_GPR] = {
600		.core_note_type = NT_PRSTATUS,
601		.n = sizeof(struct user_pt_regs) / sizeof(u64),
602		.size = sizeof(u64),
603		.align = sizeof(u64),
604		.get = gpr_get,
605		.set = gpr_set
606	},
607	[REGSET_FPR] = {
608		.core_note_type = NT_PRFPREG,
609		.n = sizeof(struct user_fpsimd_state) / sizeof(u32),
610		/*
611		 * We pretend we have 32-bit registers because the fpsr and
612		 * fpcr are 32-bits wide.
613		 */
614		.size = sizeof(u32),
615		.align = sizeof(u32),
616		.get = fpr_get,
617		.set = fpr_set
618	},
619	[REGSET_TLS] = {
620		.core_note_type = NT_ARM_TLS,
621		.n = 1,
622		.size = sizeof(void *),
623		.align = sizeof(void *),
624		.get = tls_get,
625		.set = tls_set,
626	},
627#ifdef CONFIG_HAVE_HW_BREAKPOINT
628	[REGSET_HW_BREAK] = {
629		.core_note_type = NT_ARM_HW_BREAK,
630		.n = sizeof(struct user_hwdebug_state) / sizeof(u32),
631		.size = sizeof(u32),
632		.align = sizeof(u32),
633		.get = hw_break_get,
634		.set = hw_break_set,
635	},
636	[REGSET_HW_WATCH] = {
637		.core_note_type = NT_ARM_HW_WATCH,
638		.n = sizeof(struct user_hwdebug_state) / sizeof(u32),
639		.size = sizeof(u32),
640		.align = sizeof(u32),
641		.get = hw_break_get,
642		.set = hw_break_set,
643	},
644#endif
645	[REGSET_SYSTEM_CALL] = {
646		.core_note_type = NT_ARM_SYSTEM_CALL,
647		.n = 1,
648		.size = sizeof(int),
649		.align = sizeof(int),
650		.get = system_call_get,
651		.set = system_call_set,
652	},
653};
654
655static const struct user_regset_view user_aarch64_view = {
656	.name = "aarch64", .e_machine = EM_AARCH64,
657	.regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
658};
659
660#ifdef CONFIG_COMPAT
661#include <linux/compat.h>
662
663enum compat_regset {
664	REGSET_COMPAT_GPR,
665	REGSET_COMPAT_VFP,
666};
667
668static int compat_gpr_get(struct task_struct *target,
669			  const struct user_regset *regset,
670			  unsigned int pos, unsigned int count,
671			  void *kbuf, void __user *ubuf)
672{
673	int ret = 0;
674	unsigned int i, start, num_regs;
675
676	/* Calculate the number of AArch32 registers contained in count */
677	num_regs = count / regset->size;
678
679	/* Convert pos into an register number */
680	start = pos / regset->size;
681
682	if (start + num_regs > regset->n)
683		return -EIO;
684
685	for (i = 0; i < num_regs; ++i) {
686		unsigned int idx = start + i;
687		compat_ulong_t reg;
688
689		switch (idx) {
690		case 15:
691			reg = task_pt_regs(target)->pc;
692			break;
693		case 16:
694			reg = task_pt_regs(target)->pstate;
695			break;
696		case 17:
697			reg = task_pt_regs(target)->orig_x0;
698			break;
699		default:
700			reg = task_pt_regs(target)->regs[idx];
701		}
702
703		if (kbuf) {
704			memcpy(kbuf, &reg, sizeof(reg));
705			kbuf += sizeof(reg);
706		} else {
707			ret = copy_to_user(ubuf, &reg, sizeof(reg));
708			if (ret) {
709				ret = -EFAULT;
710				break;
711			}
712
713			ubuf += sizeof(reg);
714		}
715	}
716
717	return ret;
718}
719
720static int compat_gpr_set(struct task_struct *target,
721			  const struct user_regset *regset,
722			  unsigned int pos, unsigned int count,
723			  const void *kbuf, const void __user *ubuf)
724{
725	struct pt_regs newregs;
726	int ret = 0;
727	unsigned int i, start, num_regs;
728
729	/* Calculate the number of AArch32 registers contained in count */
730	num_regs = count / regset->size;
731
732	/* Convert pos into an register number */
733	start = pos / regset->size;
734
735	if (start + num_regs > regset->n)
736		return -EIO;
737
738	newregs = *task_pt_regs(target);
739
740	for (i = 0; i < num_regs; ++i) {
741		unsigned int idx = start + i;
742		compat_ulong_t reg;
743
744		if (kbuf) {
745			memcpy(&reg, kbuf, sizeof(reg));
746			kbuf += sizeof(reg);
747		} else {
748			ret = copy_from_user(&reg, ubuf, sizeof(reg));
749			if (ret) {
750				ret = -EFAULT;
751				break;
752			}
753
754			ubuf += sizeof(reg);
755		}
756
757		switch (idx) {
758		case 15:
759			newregs.pc = reg;
760			break;
761		case 16:
762			newregs.pstate = reg;
763			break;
764		case 17:
765			newregs.orig_x0 = reg;
766			break;
767		default:
768			newregs.regs[idx] = reg;
769		}
770
771	}
772
773	if (valid_user_regs(&newregs.user_regs))
774		*task_pt_regs(target) = newregs;
775	else
776		ret = -EINVAL;
777
778	return ret;
779}
780
781static int compat_vfp_get(struct task_struct *target,
782			  const struct user_regset *regset,
783			  unsigned int pos, unsigned int count,
784			  void *kbuf, void __user *ubuf)
785{
786	struct user_fpsimd_state *uregs;
787	compat_ulong_t fpscr;
788	int ret;
789
790	uregs = &target->thread.fpsimd_state.user_fpsimd;
791
792	/*
793	 * The VFP registers are packed into the fpsimd_state, so they all sit
794	 * nicely together for us. We just need to create the fpscr separately.
795	 */
796	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
797				  VFP_STATE_SIZE - sizeof(compat_ulong_t));
798
799	if (count && !ret) {
800		fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
801			(uregs->fpcr & VFP_FPSCR_CTRL_MASK);
802		ret = put_user(fpscr, (compat_ulong_t *)ubuf);
803	}
804
805	return ret;
806}
807
808static int compat_vfp_set(struct task_struct *target,
809			  const struct user_regset *regset,
810			  unsigned int pos, unsigned int count,
811			  const void *kbuf, const void __user *ubuf)
812{
813	struct user_fpsimd_state *uregs;
814	compat_ulong_t fpscr;
815	int ret;
816
817	if (pos + count > VFP_STATE_SIZE)
818		return -EIO;
819
820	uregs = &target->thread.fpsimd_state.user_fpsimd;
821
822	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
823				 VFP_STATE_SIZE - sizeof(compat_ulong_t));
824
825	if (count && !ret) {
826		ret = get_user(fpscr, (compat_ulong_t *)ubuf);
827		uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
828		uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
829	}
830
831	fpsimd_flush_task_state(target);
832	return ret;
833}
834
835static const struct user_regset aarch32_regsets[] = {
836	[REGSET_COMPAT_GPR] = {
837		.core_note_type = NT_PRSTATUS,
838		.n = COMPAT_ELF_NGREG,
839		.size = sizeof(compat_elf_greg_t),
840		.align = sizeof(compat_elf_greg_t),
841		.get = compat_gpr_get,
842		.set = compat_gpr_set
843	},
844	[REGSET_COMPAT_VFP] = {
845		.core_note_type = NT_ARM_VFP,
846		.n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
847		.size = sizeof(compat_ulong_t),
848		.align = sizeof(compat_ulong_t),
849		.get = compat_vfp_get,
850		.set = compat_vfp_set
851	},
852};
853
854static const struct user_regset_view user_aarch32_view = {
855	.name = "aarch32", .e_machine = EM_ARM,
856	.regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
857};
858
859static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
860				   compat_ulong_t __user *ret)
861{
862	compat_ulong_t tmp;
863
864	if (off & 3)
865		return -EIO;
866
867	if (off == COMPAT_PT_TEXT_ADDR)
868		tmp = tsk->mm->start_code;
869	else if (off == COMPAT_PT_DATA_ADDR)
870		tmp = tsk->mm->start_data;
871	else if (off == COMPAT_PT_TEXT_END_ADDR)
872		tmp = tsk->mm->end_code;
873	else if (off < sizeof(compat_elf_gregset_t))
874		return copy_regset_to_user(tsk, &user_aarch32_view,
875					   REGSET_COMPAT_GPR, off,
876					   sizeof(compat_ulong_t), ret);
877	else if (off >= COMPAT_USER_SZ)
878		return -EIO;
879	else
880		tmp = 0;
881
882	return put_user(tmp, ret);
883}
884
885static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
886				    compat_ulong_t val)
887{
888	int ret;
889	mm_segment_t old_fs = get_fs();
890
891	if (off & 3 || off >= COMPAT_USER_SZ)
892		return -EIO;
893
894	if (off >= sizeof(compat_elf_gregset_t))
895		return 0;
896
897	set_fs(KERNEL_DS);
898	ret = copy_regset_from_user(tsk, &user_aarch32_view,
899				    REGSET_COMPAT_GPR, off,
900				    sizeof(compat_ulong_t),
901				    &val);
902	set_fs(old_fs);
903
904	return ret;
905}
906
907#ifdef CONFIG_HAVE_HW_BREAKPOINT
908
909/*
910 * Convert a virtual register number into an index for a thread_info
911 * breakpoint array. Breakpoints are identified using positive numbers
912 * whilst watchpoints are negative. The registers are laid out as pairs
913 * of (address, control), each pair mapping to a unique hw_breakpoint struct.
914 * Register 0 is reserved for describing resource information.
915 */
916static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
917{
918	return (abs(num) - 1) >> 1;
919}
920
921static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
922{
923	u8 num_brps, num_wrps, debug_arch, wp_len;
924	u32 reg = 0;
925
926	num_brps	= hw_breakpoint_slots(TYPE_INST);
927	num_wrps	= hw_breakpoint_slots(TYPE_DATA);
928
929	debug_arch	= debug_monitors_arch();
930	wp_len		= 8;
931	reg		|= debug_arch;
932	reg		<<= 8;
933	reg		|= wp_len;
934	reg		<<= 8;
935	reg		|= num_wrps;
936	reg		<<= 8;
937	reg		|= num_brps;
938
939	*kdata = reg;
940	return 0;
941}
942
943static int compat_ptrace_hbp_get(unsigned int note_type,
944				 struct task_struct *tsk,
945				 compat_long_t num,
946				 u32 *kdata)
947{
948	u64 addr = 0;
949	u32 ctrl = 0;
950
951	int err, idx = compat_ptrace_hbp_num_to_idx(num);;
952
953	if (num & 1) {
954		err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
955		*kdata = (u32)addr;
956	} else {
957		err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
958		*kdata = ctrl;
959	}
960
961	return err;
962}
963
964static int compat_ptrace_hbp_set(unsigned int note_type,
965				 struct task_struct *tsk,
966				 compat_long_t num,
967				 u32 *kdata)
968{
969	u64 addr;
970	u32 ctrl;
971
972	int err, idx = compat_ptrace_hbp_num_to_idx(num);
973
974	if (num & 1) {
975		addr = *kdata;
976		err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
977	} else {
978		ctrl = *kdata;
979		err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
980	}
981
982	return err;
983}
984
985static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
986				    compat_ulong_t __user *data)
987{
988	int ret;
989	u32 kdata;
990	mm_segment_t old_fs = get_fs();
991
992	set_fs(KERNEL_DS);
993	/* Watchpoint */
994	if (num < 0) {
995		ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
996	/* Resource info */
997	} else if (num == 0) {
998		ret = compat_ptrace_hbp_get_resource_info(&kdata);
999	/* Breakpoint */
1000	} else {
1001		ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
1002	}
1003	set_fs(old_fs);
1004
1005	if (!ret)
1006		ret = put_user(kdata, data);
1007
1008	return ret;
1009}
1010
1011static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
1012				    compat_ulong_t __user *data)
1013{
1014	int ret;
1015	u32 kdata = 0;
1016	mm_segment_t old_fs = get_fs();
1017
1018	if (num == 0)
1019		return 0;
1020
1021	ret = get_user(kdata, data);
1022	if (ret)
1023		return ret;
1024
1025	set_fs(KERNEL_DS);
1026	if (num < 0)
1027		ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
1028	else
1029		ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
1030	set_fs(old_fs);
1031
1032	return ret;
1033}
1034#endif	/* CONFIG_HAVE_HW_BREAKPOINT */
1035
1036long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1037			compat_ulong_t caddr, compat_ulong_t cdata)
1038{
1039	unsigned long addr = caddr;
1040	unsigned long data = cdata;
1041	void __user *datap = compat_ptr(data);
1042	int ret;
1043
1044	switch (request) {
1045		case PTRACE_PEEKUSR:
1046			ret = compat_ptrace_read_user(child, addr, datap);
1047			break;
1048
1049		case PTRACE_POKEUSR:
1050			ret = compat_ptrace_write_user(child, addr, data);
1051			break;
1052
1053		case COMPAT_PTRACE_GETREGS:
1054			ret = copy_regset_to_user(child,
1055						  &user_aarch32_view,
1056						  REGSET_COMPAT_GPR,
1057						  0, sizeof(compat_elf_gregset_t),
1058						  datap);
1059			break;
1060
1061		case COMPAT_PTRACE_SETREGS:
1062			ret = copy_regset_from_user(child,
1063						    &user_aarch32_view,
1064						    REGSET_COMPAT_GPR,
1065						    0, sizeof(compat_elf_gregset_t),
1066						    datap);
1067			break;
1068
1069		case COMPAT_PTRACE_GET_THREAD_AREA:
1070			ret = put_user((compat_ulong_t)child->thread.tp_value,
1071				       (compat_ulong_t __user *)datap);
1072			break;
1073
1074		case COMPAT_PTRACE_SET_SYSCALL:
1075			task_pt_regs(child)->syscallno = data;
1076			ret = 0;
1077			break;
1078
1079		case COMPAT_PTRACE_GETVFPREGS:
1080			ret = copy_regset_to_user(child,
1081						  &user_aarch32_view,
1082						  REGSET_COMPAT_VFP,
1083						  0, VFP_STATE_SIZE,
1084						  datap);
1085			break;
1086
1087		case COMPAT_PTRACE_SETVFPREGS:
1088			ret = copy_regset_from_user(child,
1089						    &user_aarch32_view,
1090						    REGSET_COMPAT_VFP,
1091						    0, VFP_STATE_SIZE,
1092						    datap);
1093			break;
1094
1095#ifdef CONFIG_HAVE_HW_BREAKPOINT
1096		case COMPAT_PTRACE_GETHBPREGS:
1097			ret = compat_ptrace_gethbpregs(child, addr, datap);
1098			break;
1099
1100		case COMPAT_PTRACE_SETHBPREGS:
1101			ret = compat_ptrace_sethbpregs(child, addr, datap);
1102			break;
1103#endif
1104
1105		default:
1106			ret = compat_ptrace_request(child, request, addr,
1107						    data);
1108			break;
1109	}
1110
1111	return ret;
1112}
1113#endif /* CONFIG_COMPAT */
1114
1115const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1116{
1117#ifdef CONFIG_COMPAT
1118	if (is_compat_thread(task_thread_info(task)))
1119		return &user_aarch32_view;
1120#endif
1121	return &user_aarch64_view;
1122}
1123
1124long arch_ptrace(struct task_struct *child, long request,
1125		 unsigned long addr, unsigned long data)
1126{
1127	return ptrace_request(child, request, addr, data);
1128}
1129
1130enum ptrace_syscall_dir {
1131	PTRACE_SYSCALL_ENTER = 0,
1132	PTRACE_SYSCALL_EXIT,
1133};
1134
1135static void tracehook_report_syscall(struct pt_regs *regs,
1136				     enum ptrace_syscall_dir dir)
1137{
1138	int regno;
1139	unsigned long saved_reg;
1140
1141	/*
1142	 * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
1143	 * used to denote syscall entry/exit:
1144	 */
1145	regno = (is_compat_task() ? 12 : 7);
1146	saved_reg = regs->regs[regno];
1147	regs->regs[regno] = dir;
1148
1149	if (dir == PTRACE_SYSCALL_EXIT)
1150		tracehook_report_syscall_exit(regs, 0);
1151	else if (tracehook_report_syscall_entry(regs))
1152		regs->syscallno = ~0UL;
1153
1154	regs->regs[regno] = saved_reg;
1155}
1156
1157asmlinkage int syscall_trace_enter(struct pt_regs *regs)
1158{
1159	/* Do the secure computing check first; failures should be fast. */
1160	if (secure_computing() == -1)
1161		return -1;
1162
1163	if (test_thread_flag(TIF_SYSCALL_TRACE))
1164		tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
1165
1166	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
1167		trace_sys_enter(regs, regs->syscallno);
1168
1169	audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1],
1170			    regs->regs[2], regs->regs[3]);
1171
1172	return regs->syscallno;
1173}
1174
1175asmlinkage void syscall_trace_exit(struct pt_regs *regs)
1176{
1177	audit_syscall_exit(regs);
1178
1179	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
1180		trace_sys_exit(regs, regs_return_value(regs));
1181
1182	if (test_thread_flag(TIF_SYSCALL_TRACE))
1183		tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
1184}
1185