This source file includes following definitions.
- get_stack_long
- get_fpu_long
- put_stack_long
- put_fpu_long
- user_enable_single_step
- user_disable_single_step
- genregs_get
- genregs_set
- fpregs_get
- fpregs_set
- fpregs_active
- task_user_regset_view
- arch_ptrace
- sh64_ptrace
- do_syscall_trace_enter
- do_syscall_trace_leave
- do_single_step
- BUILD_TRAP_HANDLER
- ptrace_disable
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15 #include <linux/kernel.h>
16 #include <linux/rwsem.h>
17 #include <linux/sched.h>
18 #include <linux/sched/task_stack.h>
19 #include <linux/mm.h>
20 #include <linux/smp.h>
21 #include <linux/bitops.h>
22 #include <linux/errno.h>
23 #include <linux/ptrace.h>
24 #include <linux/user.h>
25 #include <linux/signal.h>
26 #include <linux/syscalls.h>
27 #include <linux/audit.h>
28 #include <linux/seccomp.h>
29 #include <linux/tracehook.h>
30 #include <linux/elf.h>
31 #include <linux/regset.h>
32 #include <asm/io.h>
33 #include <linux/uaccess.h>
34 #include <asm/pgtable.h>
35 #include <asm/processor.h>
36 #include <asm/mmu_context.h>
37 #include <asm/syscalls.h>
38 #include <asm/fpu.h>
39 #include <asm/traps.h>
40
41 #define CREATE_TRACE_POINTS
42 #include <trace/events/syscalls.h>
43
44
45
46 #define SR_MASK (0xffff8cfd)
47
48
49
50
51
52
53
54
55
56 static inline int get_stack_long(struct task_struct *task, int offset)
57 {
58 unsigned char *stack;
59
60 stack = (unsigned char *)(task->thread.uregs);
61 stack += offset;
62 return (*((int *)stack));
63 }
64
65 static inline unsigned long
66 get_fpu_long(struct task_struct *task, unsigned long addr)
67 {
68 unsigned long tmp;
69 struct pt_regs *regs;
70 regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
71
72 if (!tsk_used_math(task)) {
73 if (addr == offsetof(struct user_fpu_struct, fpscr)) {
74 tmp = FPSCR_INIT;
75 } else {
76 tmp = 0xffffffffUL;
77 }
78 return tmp;
79 }
80
81 if (last_task_used_math == task) {
82 enable_fpu();
83 save_fpu(task);
84 disable_fpu();
85 last_task_used_math = 0;
86 regs->sr |= SR_FD;
87 }
88
89 tmp = ((long *)task->thread.xstate)[addr / sizeof(unsigned long)];
90 return tmp;
91 }
92
93
94
95
96 static inline int put_stack_long(struct task_struct *task, int offset,
97 unsigned long data)
98 {
99 unsigned char *stack;
100
101 stack = (unsigned char *)(task->thread.uregs);
102 stack += offset;
103 *(unsigned long *) stack = data;
104 return 0;
105 }
106
107 static inline int
108 put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data)
109 {
110 struct pt_regs *regs;
111
112 regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
113
114 if (!tsk_used_math(task)) {
115 init_fpu(task);
116 } else if (last_task_used_math == task) {
117 enable_fpu();
118 save_fpu(task);
119 disable_fpu();
120 last_task_used_math = 0;
121 regs->sr |= SR_FD;
122 }
123
124 ((long *)task->thread.xstate)[addr / sizeof(unsigned long)] = data;
125 return 0;
126 }
127
128 void user_enable_single_step(struct task_struct *child)
129 {
130 struct pt_regs *regs = child->thread.uregs;
131
132 regs->sr |= SR_SSTEP;
133
134 set_tsk_thread_flag(child, TIF_SINGLESTEP);
135 }
136
137 void user_disable_single_step(struct task_struct *child)
138 {
139 struct pt_regs *regs = child->thread.uregs;
140
141 regs->sr &= ~SR_SSTEP;
142
143 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
144 }
145
146 static int genregs_get(struct task_struct *target,
147 const struct user_regset *regset,
148 unsigned int pos, unsigned int count,
149 void *kbuf, void __user *ubuf)
150 {
151 const struct pt_regs *regs = task_pt_regs(target);
152 int ret;
153
154
155 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
156 ®s->pc,
157 0, 3 * sizeof(unsigned long long));
158
159
160 if (!ret)
161 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
162 regs->regs,
163 offsetof(struct pt_regs, regs[0]),
164 63 * sizeof(unsigned long long));
165
166 if (!ret)
167 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
168 regs->tregs,
169 offsetof(struct pt_regs, tregs[0]),
170 8 * sizeof(unsigned long long));
171
172 if (!ret)
173 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
174 sizeof(struct pt_regs), -1);
175
176 return ret;
177 }
178
179 static int genregs_set(struct task_struct *target,
180 const struct user_regset *regset,
181 unsigned int pos, unsigned int count,
182 const void *kbuf, const void __user *ubuf)
183 {
184 struct pt_regs *regs = task_pt_regs(target);
185 int ret;
186
187
188 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
189 ®s->pc,
190 0, 3 * sizeof(unsigned long long));
191
192
193 if (!ret && count > 0)
194 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
195 regs->regs,
196 offsetof(struct pt_regs, regs[0]),
197 63 * sizeof(unsigned long long));
198
199
200 if (!ret && count > 0)
201 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
202 regs->tregs,
203 offsetof(struct pt_regs, tregs[0]),
204 8 * sizeof(unsigned long long));
205
206 if (!ret)
207 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
208 sizeof(struct pt_regs), -1);
209
210 return ret;
211 }
212
213 #ifdef CONFIG_SH_FPU
214 int fpregs_get(struct task_struct *target,
215 const struct user_regset *regset,
216 unsigned int pos, unsigned int count,
217 void *kbuf, void __user *ubuf)
218 {
219 int ret;
220
221 ret = init_fpu(target);
222 if (ret)
223 return ret;
224
225 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
226 &target->thread.xstate->hardfpu, 0, -1);
227 }
228
229 static int fpregs_set(struct task_struct *target,
230 const struct user_regset *regset,
231 unsigned int pos, unsigned int count,
232 const void *kbuf, const void __user *ubuf)
233 {
234 int ret;
235
236 ret = init_fpu(target);
237 if (ret)
238 return ret;
239
240 set_stopped_child_used_math(target);
241
242 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
243 &target->thread.xstate->hardfpu, 0, -1);
244 }
245
246 static int fpregs_active(struct task_struct *target,
247 const struct user_regset *regset)
248 {
249 return tsk_used_math(target) ? regset->n : 0;
250 }
251 #endif
252
253 const struct pt_regs_offset regoffset_table[] = {
254 REG_OFFSET_NAME(pc),
255 REG_OFFSET_NAME(sr),
256 REG_OFFSET_NAME(syscall_nr),
257 REGS_OFFSET_NAME(0),
258 REGS_OFFSET_NAME(1),
259 REGS_OFFSET_NAME(2),
260 REGS_OFFSET_NAME(3),
261 REGS_OFFSET_NAME(4),
262 REGS_OFFSET_NAME(5),
263 REGS_OFFSET_NAME(6),
264 REGS_OFFSET_NAME(7),
265 REGS_OFFSET_NAME(8),
266 REGS_OFFSET_NAME(9),
267 REGS_OFFSET_NAME(10),
268 REGS_OFFSET_NAME(11),
269 REGS_OFFSET_NAME(12),
270 REGS_OFFSET_NAME(13),
271 REGS_OFFSET_NAME(14),
272 REGS_OFFSET_NAME(15),
273 REGS_OFFSET_NAME(16),
274 REGS_OFFSET_NAME(17),
275 REGS_OFFSET_NAME(18),
276 REGS_OFFSET_NAME(19),
277 REGS_OFFSET_NAME(20),
278 REGS_OFFSET_NAME(21),
279 REGS_OFFSET_NAME(22),
280 REGS_OFFSET_NAME(23),
281 REGS_OFFSET_NAME(24),
282 REGS_OFFSET_NAME(25),
283 REGS_OFFSET_NAME(26),
284 REGS_OFFSET_NAME(27),
285 REGS_OFFSET_NAME(28),
286 REGS_OFFSET_NAME(29),
287 REGS_OFFSET_NAME(30),
288 REGS_OFFSET_NAME(31),
289 REGS_OFFSET_NAME(32),
290 REGS_OFFSET_NAME(33),
291 REGS_OFFSET_NAME(34),
292 REGS_OFFSET_NAME(35),
293 REGS_OFFSET_NAME(36),
294 REGS_OFFSET_NAME(37),
295 REGS_OFFSET_NAME(38),
296 REGS_OFFSET_NAME(39),
297 REGS_OFFSET_NAME(40),
298 REGS_OFFSET_NAME(41),
299 REGS_OFFSET_NAME(42),
300 REGS_OFFSET_NAME(43),
301 REGS_OFFSET_NAME(44),
302 REGS_OFFSET_NAME(45),
303 REGS_OFFSET_NAME(46),
304 REGS_OFFSET_NAME(47),
305 REGS_OFFSET_NAME(48),
306 REGS_OFFSET_NAME(49),
307 REGS_OFFSET_NAME(50),
308 REGS_OFFSET_NAME(51),
309 REGS_OFFSET_NAME(52),
310 REGS_OFFSET_NAME(53),
311 REGS_OFFSET_NAME(54),
312 REGS_OFFSET_NAME(55),
313 REGS_OFFSET_NAME(56),
314 REGS_OFFSET_NAME(57),
315 REGS_OFFSET_NAME(58),
316 REGS_OFFSET_NAME(59),
317 REGS_OFFSET_NAME(60),
318 REGS_OFFSET_NAME(61),
319 REGS_OFFSET_NAME(62),
320 REGS_OFFSET_NAME(63),
321 TREGS_OFFSET_NAME(0),
322 TREGS_OFFSET_NAME(1),
323 TREGS_OFFSET_NAME(2),
324 TREGS_OFFSET_NAME(3),
325 TREGS_OFFSET_NAME(4),
326 TREGS_OFFSET_NAME(5),
327 TREGS_OFFSET_NAME(6),
328 TREGS_OFFSET_NAME(7),
329 REG_OFFSET_END,
330 };
331
332
333
334
335 enum sh_regset {
336 REGSET_GENERAL,
337 #ifdef CONFIG_SH_FPU
338 REGSET_FPU,
339 #endif
340 };
341
342 static const struct user_regset sh_regsets[] = {
343
344
345
346
347
348
349 [REGSET_GENERAL] = {
350 .core_note_type = NT_PRSTATUS,
351 .n = ELF_NGREG,
352 .size = sizeof(long long),
353 .align = sizeof(long long),
354 .get = genregs_get,
355 .set = genregs_set,
356 },
357
358 #ifdef CONFIG_SH_FPU
359 [REGSET_FPU] = {
360 .core_note_type = NT_PRFPREG,
361 .n = sizeof(struct user_fpu_struct) /
362 sizeof(long long),
363 .size = sizeof(long long),
364 .align = sizeof(long long),
365 .get = fpregs_get,
366 .set = fpregs_set,
367 .active = fpregs_active,
368 },
369 #endif
370 };
371
372 static const struct user_regset_view user_sh64_native_view = {
373 .name = "sh64",
374 .e_machine = EM_SH,
375 .regsets = sh_regsets,
376 .n = ARRAY_SIZE(sh_regsets),
377 };
378
379 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
380 {
381 return &user_sh64_native_view;
382 }
383
384 long arch_ptrace(struct task_struct *child, long request,
385 unsigned long addr, unsigned long data)
386 {
387 int ret;
388 unsigned long __user *datap = (unsigned long __user *) data;
389
390 switch (request) {
391
392 case PTRACE_PEEKUSR: {
393 unsigned long tmp;
394
395 ret = -EIO;
396 if ((addr & 3) || addr < 0)
397 break;
398
399 if (addr < sizeof(struct pt_regs))
400 tmp = get_stack_long(child, addr);
401 else if ((addr >= offsetof(struct user, fpu)) &&
402 (addr < offsetof(struct user, u_fpvalid))) {
403 unsigned long index;
404 ret = init_fpu(child);
405 if (ret)
406 break;
407 index = addr - offsetof(struct user, fpu);
408 tmp = get_fpu_long(child, index);
409 } else if (addr == offsetof(struct user, u_fpvalid)) {
410 tmp = !!tsk_used_math(child);
411 } else {
412 break;
413 }
414 ret = put_user(tmp, datap);
415 break;
416 }
417
418 case PTRACE_POKEUSR:
419
420
421
422
423 ret = -EIO;
424 if ((addr & 3) || addr < 0)
425 break;
426
427 if (addr < sizeof(struct pt_regs)) {
428
429 if (addr == offsetof (struct pt_regs, sr)+4)
430 {
431 ret = 0;
432 break;
433 }
434
435 if (addr == offsetof (struct pt_regs, sr))
436 {
437 long cursr = get_stack_long(child, addr);
438 data &= ~(SR_MASK);
439 data |= (cursr & SR_MASK);
440 }
441 ret = put_stack_long(child, addr, data);
442 }
443 else if ((addr >= offsetof(struct user, fpu)) &&
444 (addr < offsetof(struct user, u_fpvalid))) {
445 unsigned long index;
446 ret = init_fpu(child);
447 if (ret)
448 break;
449 index = addr - offsetof(struct user, fpu);
450 ret = put_fpu_long(child, index, data);
451 }
452 break;
453
454 case PTRACE_GETREGS:
455 return copy_regset_to_user(child, &user_sh64_native_view,
456 REGSET_GENERAL,
457 0, sizeof(struct pt_regs),
458 datap);
459 case PTRACE_SETREGS:
460 return copy_regset_from_user(child, &user_sh64_native_view,
461 REGSET_GENERAL,
462 0, sizeof(struct pt_regs),
463 datap);
464 #ifdef CONFIG_SH_FPU
465 case PTRACE_GETFPREGS:
466 return copy_regset_to_user(child, &user_sh64_native_view,
467 REGSET_FPU,
468 0, sizeof(struct user_fpu_struct),
469 datap);
470 case PTRACE_SETFPREGS:
471 return copy_regset_from_user(child, &user_sh64_native_view,
472 REGSET_FPU,
473 0, sizeof(struct user_fpu_struct),
474 datap);
475 #endif
476 default:
477 ret = ptrace_request(child, request, addr, data);
478 break;
479 }
480
481 return ret;
482 }
483
484 asmlinkage int sh64_ptrace(long request, long pid,
485 unsigned long addr, unsigned long data)
486 {
487 #define WPC_DBRMODE 0x0d104008
488 static unsigned long first_call;
489
490 if (!test_and_set_bit(0, &first_call)) {
491
492
493
494
495
496
497
498 printk("DBRMODE set to 0 to permit native debugging\n");
499 poke_real_address_q(WPC_DBRMODE, 0);
500 }
501
502 return sys_ptrace(request, pid, addr, data);
503 }
504
505 asmlinkage long long do_syscall_trace_enter(struct pt_regs *regs)
506 {
507 long long ret = 0;
508
509 secure_computing_strict(regs->regs[9]);
510
511 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
512 tracehook_report_syscall_entry(regs))
513
514
515
516
517
518 ret = -1LL;
519
520 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
521 trace_sys_enter(regs, regs->regs[9]);
522
523 audit_syscall_entry(regs->regs[1], regs->regs[2], regs->regs[3],
524 regs->regs[4], regs->regs[5]);
525
526 return ret ?: regs->regs[9];
527 }
528
529 asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
530 {
531 int step;
532
533 audit_syscall_exit(regs);
534
535 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
536 trace_sys_exit(regs, regs->regs[9]);
537
538 step = test_thread_flag(TIF_SINGLESTEP);
539 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
540 tracehook_report_syscall_exit(regs, step);
541 }
542
543
544 asmlinkage void do_single_step(unsigned long long vec, struct pt_regs *regs)
545 {
546
547
548
549
550
551 local_irq_enable();
552 regs->sr &= ~SR_SSTEP;
553 force_sig(SIGTRAP);
554 }
555
556
557 BUILD_TRAP_HANDLER(breakpoint)
558 {
559 TRAP_HANDLER_DECL;
560
561
562
563 local_irq_enable();
564 force_sig(SIGTRAP);
565 regs->pc += 4;
566 }
567
568
569
570
571
572
573 void ptrace_disable(struct task_struct *child)
574 {
575 user_disable_single_step(child);
576 }