1/* 2 * PowerPC version 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 4 * 5 * Derived from "arch/m68k/kernel/ptrace.c" 6 * Copyright (C) 1994 by Hamish Macdonald 7 * Taken from linux/kernel/ptrace.c and modified for M680x0. 8 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds 9 * 10 * Modified by Cort Dougan (cort@hq.fsmlabs.com) 11 * and Paul Mackerras (paulus@samba.org). 12 * 13 * This file is subject to the terms and conditions of the GNU General 14 * Public License. See the file README.legal in the main directory of 15 * this archive for more details. 16 */ 17 18#include <linux/kernel.h> 19#include <linux/sched.h> 20#include <linux/mm.h> 21#include <linux/smp.h> 22#include <linux/errno.h> 23#include <linux/ptrace.h> 24#include <linux/regset.h> 25#include <linux/tracehook.h> 26#include <linux/elf.h> 27#include <linux/user.h> 28#include <linux/security.h> 29#include <linux/signal.h> 30#include <linux/seccomp.h> 31#include <linux/audit.h> 32#include <trace/syscall.h> 33#include <linux/hw_breakpoint.h> 34#include <linux/perf_event.h> 35#include <linux/context_tracking.h> 36 37#include <asm/uaccess.h> 38#include <asm/page.h> 39#include <asm/pgtable.h> 40#include <asm/switch_to.h> 41 42#define CREATE_TRACE_POINTS 43#include <trace/events/syscalls.h> 44 45/* 46 * The parameter save area on the stack is used to store arguments being passed 47 * to callee function and is located at fixed offset from stack pointer. 48 */ 49#ifdef CONFIG_PPC32 50#define PARAMETER_SAVE_AREA_OFFSET 24 /* bytes */ 51#else /* CONFIG_PPC32 */ 52#define PARAMETER_SAVE_AREA_OFFSET 48 /* bytes */ 53#endif 54 55struct pt_regs_offset { 56 const char *name; 57 int offset; 58}; 59 60#define STR(s) #s /* convert to string */ 61#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} 62#define GPR_OFFSET_NAME(num) \ 63 {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])} 64#define REG_OFFSET_END {.name = NULL, .offset = 0} 65 66static const struct pt_regs_offset regoffset_table[] = { 67 GPR_OFFSET_NAME(0), 68 GPR_OFFSET_NAME(1), 69 GPR_OFFSET_NAME(2), 70 GPR_OFFSET_NAME(3), 71 GPR_OFFSET_NAME(4), 72 GPR_OFFSET_NAME(5), 73 GPR_OFFSET_NAME(6), 74 GPR_OFFSET_NAME(7), 75 GPR_OFFSET_NAME(8), 76 GPR_OFFSET_NAME(9), 77 GPR_OFFSET_NAME(10), 78 GPR_OFFSET_NAME(11), 79 GPR_OFFSET_NAME(12), 80 GPR_OFFSET_NAME(13), 81 GPR_OFFSET_NAME(14), 82 GPR_OFFSET_NAME(15), 83 GPR_OFFSET_NAME(16), 84 GPR_OFFSET_NAME(17), 85 GPR_OFFSET_NAME(18), 86 GPR_OFFSET_NAME(19), 87 GPR_OFFSET_NAME(20), 88 GPR_OFFSET_NAME(21), 89 GPR_OFFSET_NAME(22), 90 GPR_OFFSET_NAME(23), 91 GPR_OFFSET_NAME(24), 92 GPR_OFFSET_NAME(25), 93 GPR_OFFSET_NAME(26), 94 GPR_OFFSET_NAME(27), 95 GPR_OFFSET_NAME(28), 96 GPR_OFFSET_NAME(29), 97 GPR_OFFSET_NAME(30), 98 GPR_OFFSET_NAME(31), 99 REG_OFFSET_NAME(nip), 100 REG_OFFSET_NAME(msr), 101 REG_OFFSET_NAME(ctr), 102 REG_OFFSET_NAME(link), 103 REG_OFFSET_NAME(xer), 104 REG_OFFSET_NAME(ccr), 105#ifdef CONFIG_PPC64 106 REG_OFFSET_NAME(softe), 107#else 108 REG_OFFSET_NAME(mq), 109#endif 110 REG_OFFSET_NAME(trap), 111 REG_OFFSET_NAME(dar), 112 REG_OFFSET_NAME(dsisr), 113 REG_OFFSET_END, 114}; 115 116/** 117 * regs_query_register_offset() - query register offset from its name 118 * @name: the name of a register 119 * 120 * regs_query_register_offset() returns the offset of a register in struct 121 * pt_regs from its name. If the name is invalid, this returns -EINVAL; 122 */ 123int regs_query_register_offset(const char *name) 124{ 125 const struct pt_regs_offset *roff; 126 for (roff = regoffset_table; roff->name != NULL; roff++) 127 if (!strcmp(roff->name, name)) 128 return roff->offset; 129 return -EINVAL; 130} 131 132/** 133 * regs_query_register_name() - query register name from its offset 134 * @offset: the offset of a register in struct pt_regs. 135 * 136 * regs_query_register_name() returns the name of a register from its 137 * offset in struct pt_regs. If the @offset is invalid, this returns NULL; 138 */ 139const char *regs_query_register_name(unsigned int offset) 140{ 141 const struct pt_regs_offset *roff; 142 for (roff = regoffset_table; roff->name != NULL; roff++) 143 if (roff->offset == offset) 144 return roff->name; 145 return NULL; 146} 147 148/* 149 * does not yet catch signals sent when the child dies. 150 * in exit.c or in signal.c. 151 */ 152 153/* 154 * Set of msr bits that gdb can change on behalf of a process. 155 */ 156#ifdef CONFIG_PPC_ADV_DEBUG_REGS 157#define MSR_DEBUGCHANGE 0 158#else 159#define MSR_DEBUGCHANGE (MSR_SE | MSR_BE) 160#endif 161 162/* 163 * Max register writeable via put_reg 164 */ 165#ifdef CONFIG_PPC32 166#define PT_MAX_PUT_REG PT_MQ 167#else 168#define PT_MAX_PUT_REG PT_CCR 169#endif 170 171static unsigned long get_user_msr(struct task_struct *task) 172{ 173 return task->thread.regs->msr | task->thread.fpexc_mode; 174} 175 176static int set_user_msr(struct task_struct *task, unsigned long msr) 177{ 178 task->thread.regs->msr &= ~MSR_DEBUGCHANGE; 179 task->thread.regs->msr |= msr & MSR_DEBUGCHANGE; 180 return 0; 181} 182 183#ifdef CONFIG_PPC64 184static int get_user_dscr(struct task_struct *task, unsigned long *data) 185{ 186 *data = task->thread.dscr; 187 return 0; 188} 189 190static int set_user_dscr(struct task_struct *task, unsigned long dscr) 191{ 192 task->thread.dscr = dscr; 193 task->thread.dscr_inherit = 1; 194 return 0; 195} 196#else 197static int get_user_dscr(struct task_struct *task, unsigned long *data) 198{ 199 return -EIO; 200} 201 202static int set_user_dscr(struct task_struct *task, unsigned long dscr) 203{ 204 return -EIO; 205} 206#endif 207 208/* 209 * We prevent mucking around with the reserved area of trap 210 * which are used internally by the kernel. 211 */ 212static int set_user_trap(struct task_struct *task, unsigned long trap) 213{ 214 task->thread.regs->trap = trap & 0xfff0; 215 return 0; 216} 217 218/* 219 * Get contents of register REGNO in task TASK. 220 */ 221int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data) 222{ 223 if ((task->thread.regs == NULL) || !data) 224 return -EIO; 225 226 if (regno == PT_MSR) { 227 *data = get_user_msr(task); 228 return 0; 229 } 230 231 if (regno == PT_DSCR) 232 return get_user_dscr(task, data); 233 234 if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) { 235 *data = ((unsigned long *)task->thread.regs)[regno]; 236 return 0; 237 } 238 239 return -EIO; 240} 241 242/* 243 * Write contents of register REGNO in task TASK. 244 */ 245int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data) 246{ 247 if (task->thread.regs == NULL) 248 return -EIO; 249 250 if (regno == PT_MSR) 251 return set_user_msr(task, data); 252 if (regno == PT_TRAP) 253 return set_user_trap(task, data); 254 if (regno == PT_DSCR) 255 return set_user_dscr(task, data); 256 257 if (regno <= PT_MAX_PUT_REG) { 258 ((unsigned long *)task->thread.regs)[regno] = data; 259 return 0; 260 } 261 return -EIO; 262} 263 264static int gpr_get(struct task_struct *target, const struct user_regset *regset, 265 unsigned int pos, unsigned int count, 266 void *kbuf, void __user *ubuf) 267{ 268 int i, ret; 269 270 if (target->thread.regs == NULL) 271 return -EIO; 272 273 if (!FULL_REGS(target->thread.regs)) { 274 /* We have a partial register set. Fill 14-31 with bogus values */ 275 for (i = 14; i < 32; i++) 276 target->thread.regs->gpr[i] = NV_REG_POISON; 277 } 278 279 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 280 target->thread.regs, 281 0, offsetof(struct pt_regs, msr)); 282 if (!ret) { 283 unsigned long msr = get_user_msr(target); 284 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr, 285 offsetof(struct pt_regs, msr), 286 offsetof(struct pt_regs, msr) + 287 sizeof(msr)); 288 } 289 290 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) != 291 offsetof(struct pt_regs, msr) + sizeof(long)); 292 293 if (!ret) 294 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 295 &target->thread.regs->orig_gpr3, 296 offsetof(struct pt_regs, orig_gpr3), 297 sizeof(struct pt_regs)); 298 if (!ret) 299 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 300 sizeof(struct pt_regs), -1); 301 302 return ret; 303} 304 305static int gpr_set(struct task_struct *target, const struct user_regset *regset, 306 unsigned int pos, unsigned int count, 307 const void *kbuf, const void __user *ubuf) 308{ 309 unsigned long reg; 310 int ret; 311 312 if (target->thread.regs == NULL) 313 return -EIO; 314 315 CHECK_FULL_REGS(target->thread.regs); 316 317 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 318 target->thread.regs, 319 0, PT_MSR * sizeof(reg)); 320 321 if (!ret && count > 0) { 322 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®, 323 PT_MSR * sizeof(reg), 324 (PT_MSR + 1) * sizeof(reg)); 325 if (!ret) 326 ret = set_user_msr(target, reg); 327 } 328 329 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) != 330 offsetof(struct pt_regs, msr) + sizeof(long)); 331 332 if (!ret) 333 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 334 &target->thread.regs->orig_gpr3, 335 PT_ORIG_R3 * sizeof(reg), 336 (PT_MAX_PUT_REG + 1) * sizeof(reg)); 337 338 if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret) 339 ret = user_regset_copyin_ignore( 340 &pos, &count, &kbuf, &ubuf, 341 (PT_MAX_PUT_REG + 1) * sizeof(reg), 342 PT_TRAP * sizeof(reg)); 343 344 if (!ret && count > 0) { 345 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®, 346 PT_TRAP * sizeof(reg), 347 (PT_TRAP + 1) * sizeof(reg)); 348 if (!ret) 349 ret = set_user_trap(target, reg); 350 } 351 352 if (!ret) 353 ret = user_regset_copyin_ignore( 354 &pos, &count, &kbuf, &ubuf, 355 (PT_TRAP + 1) * sizeof(reg), -1); 356 357 return ret; 358} 359 360static int fpr_get(struct task_struct *target, const struct user_regset *regset, 361 unsigned int pos, unsigned int count, 362 void *kbuf, void __user *ubuf) 363{ 364#ifdef CONFIG_VSX 365 u64 buf[33]; 366 int i; 367#endif 368 flush_fp_to_thread(target); 369 370#ifdef CONFIG_VSX 371 /* copy to local buffer then write that out */ 372 for (i = 0; i < 32 ; i++) 373 buf[i] = target->thread.TS_FPR(i); 374 buf[32] = target->thread.fp_state.fpscr; 375 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1); 376 377#else 378 BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != 379 offsetof(struct thread_fp_state, fpr[32][0])); 380 381 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 382 &target->thread.fp_state, 0, -1); 383#endif 384} 385 386static int fpr_set(struct task_struct *target, const struct user_regset *regset, 387 unsigned int pos, unsigned int count, 388 const void *kbuf, const void __user *ubuf) 389{ 390#ifdef CONFIG_VSX 391 u64 buf[33]; 392 int i; 393#endif 394 flush_fp_to_thread(target); 395 396#ifdef CONFIG_VSX 397 /* copy to local buffer then write that out */ 398 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1); 399 if (i) 400 return i; 401 for (i = 0; i < 32 ; i++) 402 target->thread.TS_FPR(i) = buf[i]; 403 target->thread.fp_state.fpscr = buf[32]; 404 return 0; 405#else 406 BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != 407 offsetof(struct thread_fp_state, fpr[32][0])); 408 409 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 410 &target->thread.fp_state, 0, -1); 411#endif 412} 413 414#ifdef CONFIG_ALTIVEC 415/* 416 * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go. 417 * The transfer totals 34 quadword. Quadwords 0-31 contain the 418 * corresponding vector registers. Quadword 32 contains the vscr as the 419 * last word (offset 12) within that quadword. Quadword 33 contains the 420 * vrsave as the first word (offset 0) within the quadword. 421 * 422 * This definition of the VMX state is compatible with the current PPC32 423 * ptrace interface. This allows signal handling and ptrace to use the 424 * same structures. This also simplifies the implementation of a bi-arch 425 * (combined (32- and 64-bit) gdb. 426 */ 427 428static int vr_active(struct task_struct *target, 429 const struct user_regset *regset) 430{ 431 flush_altivec_to_thread(target); 432 return target->thread.used_vr ? regset->n : 0; 433} 434 435static int vr_get(struct task_struct *target, const struct user_regset *regset, 436 unsigned int pos, unsigned int count, 437 void *kbuf, void __user *ubuf) 438{ 439 int ret; 440 441 flush_altivec_to_thread(target); 442 443 BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) != 444 offsetof(struct thread_vr_state, vr[32])); 445 446 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 447 &target->thread.vr_state, 0, 448 33 * sizeof(vector128)); 449 if (!ret) { 450 /* 451 * Copy out only the low-order word of vrsave. 452 */ 453 union { 454 elf_vrreg_t reg; 455 u32 word; 456 } vrsave; 457 memset(&vrsave, 0, sizeof(vrsave)); 458 vrsave.word = target->thread.vrsave; 459 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave, 460 33 * sizeof(vector128), -1); 461 } 462 463 return ret; 464} 465 466static int vr_set(struct task_struct *target, const struct user_regset *regset, 467 unsigned int pos, unsigned int count, 468 const void *kbuf, const void __user *ubuf) 469{ 470 int ret; 471 472 flush_altivec_to_thread(target); 473 474 BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) != 475 offsetof(struct thread_vr_state, vr[32])); 476 477 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 478 &target->thread.vr_state, 0, 479 33 * sizeof(vector128)); 480 if (!ret && count > 0) { 481 /* 482 * We use only the first word of vrsave. 483 */ 484 union { 485 elf_vrreg_t reg; 486 u32 word; 487 } vrsave; 488 memset(&vrsave, 0, sizeof(vrsave)); 489 vrsave.word = target->thread.vrsave; 490 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave, 491 33 * sizeof(vector128), -1); 492 if (!ret) 493 target->thread.vrsave = vrsave.word; 494 } 495 496 return ret; 497} 498#endif /* CONFIG_ALTIVEC */ 499 500#ifdef CONFIG_VSX 501/* 502 * Currently to set and and get all the vsx state, you need to call 503 * the fp and VMX calls as well. This only get/sets the lower 32 504 * 128bit VSX registers. 505 */ 506 507static int vsr_active(struct task_struct *target, 508 const struct user_regset *regset) 509{ 510 flush_vsx_to_thread(target); 511 return target->thread.used_vsr ? regset->n : 0; 512} 513 514static int vsr_get(struct task_struct *target, const struct user_regset *regset, 515 unsigned int pos, unsigned int count, 516 void *kbuf, void __user *ubuf) 517{ 518 u64 buf[32]; 519 int ret, i; 520 521 flush_vsx_to_thread(target); 522 523 for (i = 0; i < 32 ; i++) 524 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET]; 525 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 526 buf, 0, 32 * sizeof(double)); 527 528 return ret; 529} 530 531static int vsr_set(struct task_struct *target, const struct user_regset *regset, 532 unsigned int pos, unsigned int count, 533 const void *kbuf, const void __user *ubuf) 534{ 535 u64 buf[32]; 536 int ret,i; 537 538 flush_vsx_to_thread(target); 539 540 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 541 buf, 0, 32 * sizeof(double)); 542 for (i = 0; i < 32 ; i++) 543 target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i]; 544 545 546 return ret; 547} 548#endif /* CONFIG_VSX */ 549 550#ifdef CONFIG_SPE 551 552/* 553 * For get_evrregs/set_evrregs functions 'data' has the following layout: 554 * 555 * struct { 556 * u32 evr[32]; 557 * u64 acc; 558 * u32 spefscr; 559 * } 560 */ 561 562static int evr_active(struct task_struct *target, 563 const struct user_regset *regset) 564{ 565 flush_spe_to_thread(target); 566 return target->thread.used_spe ? regset->n : 0; 567} 568 569static int evr_get(struct task_struct *target, const struct user_regset *regset, 570 unsigned int pos, unsigned int count, 571 void *kbuf, void __user *ubuf) 572{ 573 int ret; 574 575 flush_spe_to_thread(target); 576 577 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 578 &target->thread.evr, 579 0, sizeof(target->thread.evr)); 580 581 BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) != 582 offsetof(struct thread_struct, spefscr)); 583 584 if (!ret) 585 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 586 &target->thread.acc, 587 sizeof(target->thread.evr), -1); 588 589 return ret; 590} 591 592static int evr_set(struct task_struct *target, const struct user_regset *regset, 593 unsigned int pos, unsigned int count, 594 const void *kbuf, const void __user *ubuf) 595{ 596 int ret; 597 598 flush_spe_to_thread(target); 599 600 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 601 &target->thread.evr, 602 0, sizeof(target->thread.evr)); 603 604 BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) != 605 offsetof(struct thread_struct, spefscr)); 606 607 if (!ret) 608 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 609 &target->thread.acc, 610 sizeof(target->thread.evr), -1); 611 612 return ret; 613} 614#endif /* CONFIG_SPE */ 615 616 617/* 618 * These are our native regset flavors. 619 */ 620enum powerpc_regset { 621 REGSET_GPR, 622 REGSET_FPR, 623#ifdef CONFIG_ALTIVEC 624 REGSET_VMX, 625#endif 626#ifdef CONFIG_VSX 627 REGSET_VSX, 628#endif 629#ifdef CONFIG_SPE 630 REGSET_SPE, 631#endif 632}; 633 634static const struct user_regset native_regsets[] = { 635 [REGSET_GPR] = { 636 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, 637 .size = sizeof(long), .align = sizeof(long), 638 .get = gpr_get, .set = gpr_set 639 }, 640 [REGSET_FPR] = { 641 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG, 642 .size = sizeof(double), .align = sizeof(double), 643 .get = fpr_get, .set = fpr_set 644 }, 645#ifdef CONFIG_ALTIVEC 646 [REGSET_VMX] = { 647 .core_note_type = NT_PPC_VMX, .n = 34, 648 .size = sizeof(vector128), .align = sizeof(vector128), 649 .active = vr_active, .get = vr_get, .set = vr_set 650 }, 651#endif 652#ifdef CONFIG_VSX 653 [REGSET_VSX] = { 654 .core_note_type = NT_PPC_VSX, .n = 32, 655 .size = sizeof(double), .align = sizeof(double), 656 .active = vsr_active, .get = vsr_get, .set = vsr_set 657 }, 658#endif 659#ifdef CONFIG_SPE 660 [REGSET_SPE] = { 661 .core_note_type = NT_PPC_SPE, .n = 35, 662 .size = sizeof(u32), .align = sizeof(u32), 663 .active = evr_active, .get = evr_get, .set = evr_set 664 }, 665#endif 666}; 667 668static const struct user_regset_view user_ppc_native_view = { 669 .name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI, 670 .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets) 671}; 672 673#ifdef CONFIG_PPC64 674#include <linux/compat.h> 675 676static int gpr32_get(struct task_struct *target, 677 const struct user_regset *regset, 678 unsigned int pos, unsigned int count, 679 void *kbuf, void __user *ubuf) 680{ 681 const unsigned long *regs = &target->thread.regs->gpr[0]; 682 compat_ulong_t *k = kbuf; 683 compat_ulong_t __user *u = ubuf; 684 compat_ulong_t reg; 685 int i; 686 687 if (target->thread.regs == NULL) 688 return -EIO; 689 690 if (!FULL_REGS(target->thread.regs)) { 691 /* We have a partial register set. Fill 14-31 with bogus values */ 692 for (i = 14; i < 32; i++) 693 target->thread.regs->gpr[i] = NV_REG_POISON; 694 } 695 696 pos /= sizeof(reg); 697 count /= sizeof(reg); 698 699 if (kbuf) 700 for (; count > 0 && pos < PT_MSR; --count) 701 *k++ = regs[pos++]; 702 else 703 for (; count > 0 && pos < PT_MSR; --count) 704 if (__put_user((compat_ulong_t) regs[pos++], u++)) 705 return -EFAULT; 706 707 if (count > 0 && pos == PT_MSR) { 708 reg = get_user_msr(target); 709 if (kbuf) 710 *k++ = reg; 711 else if (__put_user(reg, u++)) 712 return -EFAULT; 713 ++pos; 714 --count; 715 } 716 717 if (kbuf) 718 for (; count > 0 && pos < PT_REGS_COUNT; --count) 719 *k++ = regs[pos++]; 720 else 721 for (; count > 0 && pos < PT_REGS_COUNT; --count) 722 if (__put_user((compat_ulong_t) regs[pos++], u++)) 723 return -EFAULT; 724 725 kbuf = k; 726 ubuf = u; 727 pos *= sizeof(reg); 728 count *= sizeof(reg); 729 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 730 PT_REGS_COUNT * sizeof(reg), -1); 731} 732 733static int gpr32_set(struct task_struct *target, 734 const struct user_regset *regset, 735 unsigned int pos, unsigned int count, 736 const void *kbuf, const void __user *ubuf) 737{ 738 unsigned long *regs = &target->thread.regs->gpr[0]; 739 const compat_ulong_t *k = kbuf; 740 const compat_ulong_t __user *u = ubuf; 741 compat_ulong_t reg; 742 743 if (target->thread.regs == NULL) 744 return -EIO; 745 746 CHECK_FULL_REGS(target->thread.regs); 747 748 pos /= sizeof(reg); 749 count /= sizeof(reg); 750 751 if (kbuf) 752 for (; count > 0 && pos < PT_MSR; --count) 753 regs[pos++] = *k++; 754 else 755 for (; count > 0 && pos < PT_MSR; --count) { 756 if (__get_user(reg, u++)) 757 return -EFAULT; 758 regs[pos++] = reg; 759 } 760 761 762 if (count > 0 && pos == PT_MSR) { 763 if (kbuf) 764 reg = *k++; 765 else if (__get_user(reg, u++)) 766 return -EFAULT; 767 set_user_msr(target, reg); 768 ++pos; 769 --count; 770 } 771 772 if (kbuf) { 773 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) 774 regs[pos++] = *k++; 775 for (; count > 0 && pos < PT_TRAP; --count, ++pos) 776 ++k; 777 } else { 778 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) { 779 if (__get_user(reg, u++)) 780 return -EFAULT; 781 regs[pos++] = reg; 782 } 783 for (; count > 0 && pos < PT_TRAP; --count, ++pos) 784 if (__get_user(reg, u++)) 785 return -EFAULT; 786 } 787 788 if (count > 0 && pos == PT_TRAP) { 789 if (kbuf) 790 reg = *k++; 791 else if (__get_user(reg, u++)) 792 return -EFAULT; 793 set_user_trap(target, reg); 794 ++pos; 795 --count; 796 } 797 798 kbuf = k; 799 ubuf = u; 800 pos *= sizeof(reg); 801 count *= sizeof(reg); 802 return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 803 (PT_TRAP + 1) * sizeof(reg), -1); 804} 805 806/* 807 * These are the regset flavors matching the CONFIG_PPC32 native set. 808 */ 809static const struct user_regset compat_regsets[] = { 810 [REGSET_GPR] = { 811 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, 812 .size = sizeof(compat_long_t), .align = sizeof(compat_long_t), 813 .get = gpr32_get, .set = gpr32_set 814 }, 815 [REGSET_FPR] = { 816 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG, 817 .size = sizeof(double), .align = sizeof(double), 818 .get = fpr_get, .set = fpr_set 819 }, 820#ifdef CONFIG_ALTIVEC 821 [REGSET_VMX] = { 822 .core_note_type = NT_PPC_VMX, .n = 34, 823 .size = sizeof(vector128), .align = sizeof(vector128), 824 .active = vr_active, .get = vr_get, .set = vr_set 825 }, 826#endif 827#ifdef CONFIG_SPE 828 [REGSET_SPE] = { 829 .core_note_type = NT_PPC_SPE, .n = 35, 830 .size = sizeof(u32), .align = sizeof(u32), 831 .active = evr_active, .get = evr_get, .set = evr_set 832 }, 833#endif 834}; 835 836static const struct user_regset_view user_ppc_compat_view = { 837 .name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI, 838 .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets) 839}; 840#endif /* CONFIG_PPC64 */ 841 842const struct user_regset_view *task_user_regset_view(struct task_struct *task) 843{ 844#ifdef CONFIG_PPC64 845 if (test_tsk_thread_flag(task, TIF_32BIT)) 846 return &user_ppc_compat_view; 847#endif 848 return &user_ppc_native_view; 849} 850 851 852void user_enable_single_step(struct task_struct *task) 853{ 854 struct pt_regs *regs = task->thread.regs; 855 856 if (regs != NULL) { 857#ifdef CONFIG_PPC_ADV_DEBUG_REGS 858 task->thread.debug.dbcr0 &= ~DBCR0_BT; 859 task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC; 860 regs->msr |= MSR_DE; 861#else 862 regs->msr &= ~MSR_BE; 863 regs->msr |= MSR_SE; 864#endif 865 } 866 set_tsk_thread_flag(task, TIF_SINGLESTEP); 867} 868 869void user_enable_block_step(struct task_struct *task) 870{ 871 struct pt_regs *regs = task->thread.regs; 872 873 if (regs != NULL) { 874#ifdef CONFIG_PPC_ADV_DEBUG_REGS 875 task->thread.debug.dbcr0 &= ~DBCR0_IC; 876 task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT; 877 regs->msr |= MSR_DE; 878#else 879 regs->msr &= ~MSR_SE; 880 regs->msr |= MSR_BE; 881#endif 882 } 883 set_tsk_thread_flag(task, TIF_SINGLESTEP); 884} 885 886void user_disable_single_step(struct task_struct *task) 887{ 888 struct pt_regs *regs = task->thread.regs; 889 890 if (regs != NULL) { 891#ifdef CONFIG_PPC_ADV_DEBUG_REGS 892 /* 893 * The logic to disable single stepping should be as 894 * simple as turning off the Instruction Complete flag. 895 * And, after doing so, if all debug flags are off, turn 896 * off DBCR0(IDM) and MSR(DE) .... Torez 897 */ 898 task->thread.debug.dbcr0 &= ~(DBCR0_IC|DBCR0_BT); 899 /* 900 * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set. 901 */ 902 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0, 903 task->thread.debug.dbcr1)) { 904 /* 905 * All debug events were off..... 906 */ 907 task->thread.debug.dbcr0 &= ~DBCR0_IDM; 908 regs->msr &= ~MSR_DE; 909 } 910#else 911 regs->msr &= ~(MSR_SE | MSR_BE); 912#endif 913 } 914 clear_tsk_thread_flag(task, TIF_SINGLESTEP); 915} 916 917#ifdef CONFIG_HAVE_HW_BREAKPOINT 918void ptrace_triggered(struct perf_event *bp, 919 struct perf_sample_data *data, struct pt_regs *regs) 920{ 921 struct perf_event_attr attr; 922 923 /* 924 * Disable the breakpoint request here since ptrace has defined a 925 * one-shot behaviour for breakpoint exceptions in PPC64. 926 * The SIGTRAP signal is generated automatically for us in do_dabr(). 927 * We don't have to do anything about that here 928 */ 929 attr = bp->attr; 930 attr.disabled = true; 931 modify_user_hw_breakpoint(bp, &attr); 932} 933#endif /* CONFIG_HAVE_HW_BREAKPOINT */ 934 935static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, 936 unsigned long data) 937{ 938#ifdef CONFIG_HAVE_HW_BREAKPOINT 939 int ret; 940 struct thread_struct *thread = &(task->thread); 941 struct perf_event *bp; 942 struct perf_event_attr attr; 943#endif /* CONFIG_HAVE_HW_BREAKPOINT */ 944#ifndef CONFIG_PPC_ADV_DEBUG_REGS 945 struct arch_hw_breakpoint hw_brk; 946#endif 947 948 /* For ppc64 we support one DABR and no IABR's at the moment (ppc64). 949 * For embedded processors we support one DAC and no IAC's at the 950 * moment. 951 */ 952 if (addr > 0) 953 return -EINVAL; 954 955 /* The bottom 3 bits in dabr are flags */ 956 if ((data & ~0x7UL) >= TASK_SIZE) 957 return -EIO; 958 959#ifndef CONFIG_PPC_ADV_DEBUG_REGS 960 /* For processors using DABR (i.e. 970), the bottom 3 bits are flags. 961 * It was assumed, on previous implementations, that 3 bits were 962 * passed together with the data address, fitting the design of the 963 * DABR register, as follows: 964 * 965 * bit 0: Read flag 966 * bit 1: Write flag 967 * bit 2: Breakpoint translation 968 * 969 * Thus, we use them here as so. 970 */ 971 972 /* Ensure breakpoint translation bit is set */ 973 if (data && !(data & HW_BRK_TYPE_TRANSLATE)) 974 return -EIO; 975 hw_brk.address = data & (~HW_BRK_TYPE_DABR); 976 hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL; 977 hw_brk.len = 8; 978#ifdef CONFIG_HAVE_HW_BREAKPOINT 979 bp = thread->ptrace_bps[0]; 980 if ((!data) || !(hw_brk.type & HW_BRK_TYPE_RDWR)) { 981 if (bp) { 982 unregister_hw_breakpoint(bp); 983 thread->ptrace_bps[0] = NULL; 984 } 985 return 0; 986 } 987 if (bp) { 988 attr = bp->attr; 989 attr.bp_addr = hw_brk.address; 990 arch_bp_generic_fields(hw_brk.type, &attr.bp_type); 991 992 /* Enable breakpoint */ 993 attr.disabled = false; 994 995 ret = modify_user_hw_breakpoint(bp, &attr); 996 if (ret) { 997 return ret; 998 } 999 thread->ptrace_bps[0] = bp; 1000 thread->hw_brk = hw_brk; 1001 return 0; 1002 } 1003 1004 /* Create a new breakpoint request if one doesn't exist already */ 1005 hw_breakpoint_init(&attr); 1006 attr.bp_addr = hw_brk.address; 1007 arch_bp_generic_fields(hw_brk.type, 1008 &attr.bp_type); 1009 1010 thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr, 1011 ptrace_triggered, NULL, task); 1012 if (IS_ERR(bp)) { 1013 thread->ptrace_bps[0] = NULL; 1014 return PTR_ERR(bp); 1015 } 1016 1017#endif /* CONFIG_HAVE_HW_BREAKPOINT */ 1018 task->thread.hw_brk = hw_brk; 1019#else /* CONFIG_PPC_ADV_DEBUG_REGS */ 1020 /* As described above, it was assumed 3 bits were passed with the data 1021 * address, but we will assume only the mode bits will be passed 1022 * as to not cause alignment restrictions for DAC-based processors. 1023 */ 1024 1025 /* DAC's hold the whole address without any mode flags */ 1026 task->thread.debug.dac1 = data & ~0x3UL; 1027 1028 if (task->thread.debug.dac1 == 0) { 1029 dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W); 1030 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0, 1031 task->thread.debug.dbcr1)) { 1032 task->thread.regs->msr &= ~MSR_DE; 1033 task->thread.debug.dbcr0 &= ~DBCR0_IDM; 1034 } 1035 return 0; 1036 } 1037 1038 /* Read or Write bits must be set */ 1039 1040 if (!(data & 0x3UL)) 1041 return -EINVAL; 1042 1043 /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0 1044 register */ 1045 task->thread.debug.dbcr0 |= DBCR0_IDM; 1046 1047 /* Check for write and read flags and set DBCR0 1048 accordingly */ 1049 dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W); 1050 if (data & 0x1UL) 1051 dbcr_dac(task) |= DBCR_DAC1R; 1052 if (data & 0x2UL) 1053 dbcr_dac(task) |= DBCR_DAC1W; 1054 task->thread.regs->msr |= MSR_DE; 1055#endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 1056 return 0; 1057} 1058 1059/* 1060 * Called by kernel/ptrace.c when detaching.. 1061 * 1062 * Make sure single step bits etc are not set. 1063 */ 1064void ptrace_disable(struct task_struct *child) 1065{ 1066 /* make sure the single step bit is not set. */ 1067 user_disable_single_step(child); 1068} 1069 1070#ifdef CONFIG_PPC_ADV_DEBUG_REGS 1071static long set_instruction_bp(struct task_struct *child, 1072 struct ppc_hw_breakpoint *bp_info) 1073{ 1074 int slot; 1075 int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0); 1076 int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0); 1077 int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0); 1078 int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0); 1079 1080 if (dbcr_iac_range(child) & DBCR_IAC12MODE) 1081 slot2_in_use = 1; 1082 if (dbcr_iac_range(child) & DBCR_IAC34MODE) 1083 slot4_in_use = 1; 1084 1085 if (bp_info->addr >= TASK_SIZE) 1086 return -EIO; 1087 1088 if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) { 1089 1090 /* Make sure range is valid. */ 1091 if (bp_info->addr2 >= TASK_SIZE) 1092 return -EIO; 1093 1094 /* We need a pair of IAC regsisters */ 1095 if ((!slot1_in_use) && (!slot2_in_use)) { 1096 slot = 1; 1097 child->thread.debug.iac1 = bp_info->addr; 1098 child->thread.debug.iac2 = bp_info->addr2; 1099 child->thread.debug.dbcr0 |= DBCR0_IAC1; 1100 if (bp_info->addr_mode == 1101 PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE) 1102 dbcr_iac_range(child) |= DBCR_IAC12X; 1103 else 1104 dbcr_iac_range(child) |= DBCR_IAC12I; 1105#if CONFIG_PPC_ADV_DEBUG_IACS > 2 1106 } else if ((!slot3_in_use) && (!slot4_in_use)) { 1107 slot = 3; 1108 child->thread.debug.iac3 = bp_info->addr; 1109 child->thread.debug.iac4 = bp_info->addr2; 1110 child->thread.debug.dbcr0 |= DBCR0_IAC3; 1111 if (bp_info->addr_mode == 1112 PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE) 1113 dbcr_iac_range(child) |= DBCR_IAC34X; 1114 else 1115 dbcr_iac_range(child) |= DBCR_IAC34I; 1116#endif 1117 } else 1118 return -ENOSPC; 1119 } else { 1120 /* We only need one. If possible leave a pair free in 1121 * case a range is needed later 1122 */ 1123 if (!slot1_in_use) { 1124 /* 1125 * Don't use iac1 if iac1-iac2 are free and either 1126 * iac3 or iac4 (but not both) are free 1127 */ 1128 if (slot2_in_use || (slot3_in_use == slot4_in_use)) { 1129 slot = 1; 1130 child->thread.debug.iac1 = bp_info->addr; 1131 child->thread.debug.dbcr0 |= DBCR0_IAC1; 1132 goto out; 1133 } 1134 } 1135 if (!slot2_in_use) { 1136 slot = 2; 1137 child->thread.debug.iac2 = bp_info->addr; 1138 child->thread.debug.dbcr0 |= DBCR0_IAC2; 1139#if CONFIG_PPC_ADV_DEBUG_IACS > 2 1140 } else if (!slot3_in_use) { 1141 slot = 3; 1142 child->thread.debug.iac3 = bp_info->addr; 1143 child->thread.debug.dbcr0 |= DBCR0_IAC3; 1144 } else if (!slot4_in_use) { 1145 slot = 4; 1146 child->thread.debug.iac4 = bp_info->addr; 1147 child->thread.debug.dbcr0 |= DBCR0_IAC4; 1148#endif 1149 } else 1150 return -ENOSPC; 1151 } 1152out: 1153 child->thread.debug.dbcr0 |= DBCR0_IDM; 1154 child->thread.regs->msr |= MSR_DE; 1155 1156 return slot; 1157} 1158 1159static int del_instruction_bp(struct task_struct *child, int slot) 1160{ 1161 switch (slot) { 1162 case 1: 1163 if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0) 1164 return -ENOENT; 1165 1166 if (dbcr_iac_range(child) & DBCR_IAC12MODE) { 1167 /* address range - clear slots 1 & 2 */ 1168 child->thread.debug.iac2 = 0; 1169 dbcr_iac_range(child) &= ~DBCR_IAC12MODE; 1170 } 1171 child->thread.debug.iac1 = 0; 1172 child->thread.debug.dbcr0 &= ~DBCR0_IAC1; 1173 break; 1174 case 2: 1175 if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0) 1176 return -ENOENT; 1177 1178 if (dbcr_iac_range(child) & DBCR_IAC12MODE) 1179 /* used in a range */ 1180 return -EINVAL; 1181 child->thread.debug.iac2 = 0; 1182 child->thread.debug.dbcr0 &= ~DBCR0_IAC2; 1183 break; 1184#if CONFIG_PPC_ADV_DEBUG_IACS > 2 1185 case 3: 1186 if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0) 1187 return -ENOENT; 1188 1189 if (dbcr_iac_range(child) & DBCR_IAC34MODE) { 1190 /* address range - clear slots 3 & 4 */ 1191 child->thread.debug.iac4 = 0; 1192 dbcr_iac_range(child) &= ~DBCR_IAC34MODE; 1193 } 1194 child->thread.debug.iac3 = 0; 1195 child->thread.debug.dbcr0 &= ~DBCR0_IAC3; 1196 break; 1197 case 4: 1198 if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0) 1199 return -ENOENT; 1200 1201 if (dbcr_iac_range(child) & DBCR_IAC34MODE) 1202 /* Used in a range */ 1203 return -EINVAL; 1204 child->thread.debug.iac4 = 0; 1205 child->thread.debug.dbcr0 &= ~DBCR0_IAC4; 1206 break; 1207#endif 1208 default: 1209 return -EINVAL; 1210 } 1211 return 0; 1212} 1213 1214static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info) 1215{ 1216 int byte_enable = 1217 (bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT) 1218 & 0xf; 1219 int condition_mode = 1220 bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE; 1221 int slot; 1222 1223 if (byte_enable && (condition_mode == 0)) 1224 return -EINVAL; 1225 1226 if (bp_info->addr >= TASK_SIZE) 1227 return -EIO; 1228 1229 if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) { 1230 slot = 1; 1231 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) 1232 dbcr_dac(child) |= DBCR_DAC1R; 1233 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) 1234 dbcr_dac(child) |= DBCR_DAC1W; 1235 child->thread.debug.dac1 = (unsigned long)bp_info->addr; 1236#if CONFIG_PPC_ADV_DEBUG_DVCS > 0 1237 if (byte_enable) { 1238 child->thread.debug.dvc1 = 1239 (unsigned long)bp_info->condition_value; 1240 child->thread.debug.dbcr2 |= 1241 ((byte_enable << DBCR2_DVC1BE_SHIFT) | 1242 (condition_mode << DBCR2_DVC1M_SHIFT)); 1243 } 1244#endif 1245#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE 1246 } else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) { 1247 /* Both dac1 and dac2 are part of a range */ 1248 return -ENOSPC; 1249#endif 1250 } else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) { 1251 slot = 2; 1252 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) 1253 dbcr_dac(child) |= DBCR_DAC2R; 1254 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) 1255 dbcr_dac(child) |= DBCR_DAC2W; 1256 child->thread.debug.dac2 = (unsigned long)bp_info->addr; 1257#if CONFIG_PPC_ADV_DEBUG_DVCS > 0 1258 if (byte_enable) { 1259 child->thread.debug.dvc2 = 1260 (unsigned long)bp_info->condition_value; 1261 child->thread.debug.dbcr2 |= 1262 ((byte_enable << DBCR2_DVC2BE_SHIFT) | 1263 (condition_mode << DBCR2_DVC2M_SHIFT)); 1264 } 1265#endif 1266 } else 1267 return -ENOSPC; 1268 child->thread.debug.dbcr0 |= DBCR0_IDM; 1269 child->thread.regs->msr |= MSR_DE; 1270 1271 return slot + 4; 1272} 1273 1274static int del_dac(struct task_struct *child, int slot) 1275{ 1276 if (slot == 1) { 1277 if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) 1278 return -ENOENT; 1279 1280 child->thread.debug.dac1 = 0; 1281 dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W); 1282#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE 1283 if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) { 1284 child->thread.debug.dac2 = 0; 1285 child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE; 1286 } 1287 child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE); 1288#endif 1289#if CONFIG_PPC_ADV_DEBUG_DVCS > 0 1290 child->thread.debug.dvc1 = 0; 1291#endif 1292 } else if (slot == 2) { 1293 if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) 1294 return -ENOENT; 1295 1296#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE 1297 if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) 1298 /* Part of a range */ 1299 return -EINVAL; 1300 child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE); 1301#endif 1302#if CONFIG_PPC_ADV_DEBUG_DVCS > 0 1303 child->thread.debug.dvc2 = 0; 1304#endif 1305 child->thread.debug.dac2 = 0; 1306 dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W); 1307 } else 1308 return -EINVAL; 1309 1310 return 0; 1311} 1312#endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 1313 1314#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE 1315static int set_dac_range(struct task_struct *child, 1316 struct ppc_hw_breakpoint *bp_info) 1317{ 1318 int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK; 1319 1320 /* We don't allow range watchpoints to be used with DVC */ 1321 if (bp_info->condition_mode) 1322 return -EINVAL; 1323 1324 /* 1325 * Best effort to verify the address range. The user/supervisor bits 1326 * prevent trapping in kernel space, but let's fail on an obvious bad 1327 * range. The simple test on the mask is not fool-proof, and any 1328 * exclusive range will spill over into kernel space. 1329 */ 1330 if (bp_info->addr >= TASK_SIZE) 1331 return -EIO; 1332 if (mode == PPC_BREAKPOINT_MODE_MASK) { 1333 /* 1334 * dac2 is a bitmask. Don't allow a mask that makes a 1335 * kernel space address from a valid dac1 value 1336 */ 1337 if (~((unsigned long)bp_info->addr2) >= TASK_SIZE) 1338 return -EIO; 1339 } else { 1340 /* 1341 * For range breakpoints, addr2 must also be a valid address 1342 */ 1343 if (bp_info->addr2 >= TASK_SIZE) 1344 return -EIO; 1345 } 1346 1347 if (child->thread.debug.dbcr0 & 1348 (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W)) 1349 return -ENOSPC; 1350 1351 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) 1352 child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM); 1353 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) 1354 child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM); 1355 child->thread.debug.dac1 = bp_info->addr; 1356 child->thread.debug.dac2 = bp_info->addr2; 1357 if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE) 1358 child->thread.debug.dbcr2 |= DBCR2_DAC12M; 1359 else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE) 1360 child->thread.debug.dbcr2 |= DBCR2_DAC12MX; 1361 else /* PPC_BREAKPOINT_MODE_MASK */ 1362 child->thread.debug.dbcr2 |= DBCR2_DAC12MM; 1363 child->thread.regs->msr |= MSR_DE; 1364 1365 return 5; 1366} 1367#endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */ 1368 1369static long ppc_set_hwdebug(struct task_struct *child, 1370 struct ppc_hw_breakpoint *bp_info) 1371{ 1372#ifdef CONFIG_HAVE_HW_BREAKPOINT 1373 int len = 0; 1374 struct thread_struct *thread = &(child->thread); 1375 struct perf_event *bp; 1376 struct perf_event_attr attr; 1377#endif /* CONFIG_HAVE_HW_BREAKPOINT */ 1378#ifndef CONFIG_PPC_ADV_DEBUG_REGS 1379 struct arch_hw_breakpoint brk; 1380#endif 1381 1382 if (bp_info->version != 1) 1383 return -ENOTSUPP; 1384#ifdef CONFIG_PPC_ADV_DEBUG_REGS 1385 /* 1386 * Check for invalid flags and combinations 1387 */ 1388 if ((bp_info->trigger_type == 0) || 1389 (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE | 1390 PPC_BREAKPOINT_TRIGGER_RW)) || 1391 (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) || 1392 (bp_info->condition_mode & 1393 ~(PPC_BREAKPOINT_CONDITION_MODE | 1394 PPC_BREAKPOINT_CONDITION_BE_ALL))) 1395 return -EINVAL; 1396#if CONFIG_PPC_ADV_DEBUG_DVCS == 0 1397 if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE) 1398 return -EINVAL; 1399#endif 1400 1401 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) { 1402 if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) || 1403 (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)) 1404 return -EINVAL; 1405 return set_instruction_bp(child, bp_info); 1406 } 1407 if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT) 1408 return set_dac(child, bp_info); 1409 1410#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE 1411 return set_dac_range(child, bp_info); 1412#else 1413 return -EINVAL; 1414#endif 1415#else /* !CONFIG_PPC_ADV_DEBUG_DVCS */ 1416 /* 1417 * We only support one data breakpoint 1418 */ 1419 if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 || 1420 (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 || 1421 bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE) 1422 return -EINVAL; 1423 1424 if ((unsigned long)bp_info->addr >= TASK_SIZE) 1425 return -EIO; 1426 1427 brk.address = bp_info->addr & ~7UL; 1428 brk.type = HW_BRK_TYPE_TRANSLATE; 1429 brk.len = 8; 1430 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) 1431 brk.type |= HW_BRK_TYPE_READ; 1432 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) 1433 brk.type |= HW_BRK_TYPE_WRITE; 1434#ifdef CONFIG_HAVE_HW_BREAKPOINT 1435 /* 1436 * Check if the request is for 'range' breakpoints. We can 1437 * support it if range < 8 bytes. 1438 */ 1439 if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE) 1440 len = bp_info->addr2 - bp_info->addr; 1441 else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT) 1442 len = 1; 1443 else 1444 return -EINVAL; 1445 bp = thread->ptrace_bps[0]; 1446 if (bp) 1447 return -ENOSPC; 1448 1449 /* Create a new breakpoint request if one doesn't exist already */ 1450 hw_breakpoint_init(&attr); 1451 attr.bp_addr = (unsigned long)bp_info->addr & ~HW_BREAKPOINT_ALIGN; 1452 attr.bp_len = len; 1453 arch_bp_generic_fields(brk.type, &attr.bp_type); 1454 1455 thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr, 1456 ptrace_triggered, NULL, child); 1457 if (IS_ERR(bp)) { 1458 thread->ptrace_bps[0] = NULL; 1459 return PTR_ERR(bp); 1460 } 1461 1462 return 1; 1463#endif /* CONFIG_HAVE_HW_BREAKPOINT */ 1464 1465 if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) 1466 return -EINVAL; 1467 1468 if (child->thread.hw_brk.address) 1469 return -ENOSPC; 1470 1471 child->thread.hw_brk = brk; 1472 1473 return 1; 1474#endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */ 1475} 1476 1477static long ppc_del_hwdebug(struct task_struct *child, long data) 1478{ 1479#ifdef CONFIG_HAVE_HW_BREAKPOINT 1480 int ret = 0; 1481 struct thread_struct *thread = &(child->thread); 1482 struct perf_event *bp; 1483#endif /* CONFIG_HAVE_HW_BREAKPOINT */ 1484#ifdef CONFIG_PPC_ADV_DEBUG_REGS 1485 int rc; 1486 1487 if (data <= 4) 1488 rc = del_instruction_bp(child, (int)data); 1489 else 1490 rc = del_dac(child, (int)data - 4); 1491 1492 if (!rc) { 1493 if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0, 1494 child->thread.debug.dbcr1)) { 1495 child->thread.debug.dbcr0 &= ~DBCR0_IDM; 1496 child->thread.regs->msr &= ~MSR_DE; 1497 } 1498 } 1499 return rc; 1500#else 1501 if (data != 1) 1502 return -EINVAL; 1503 1504#ifdef CONFIG_HAVE_HW_BREAKPOINT 1505 bp = thread->ptrace_bps[0]; 1506 if (bp) { 1507 unregister_hw_breakpoint(bp); 1508 thread->ptrace_bps[0] = NULL; 1509 } else 1510 ret = -ENOENT; 1511 return ret; 1512#else /* CONFIG_HAVE_HW_BREAKPOINT */ 1513 if (child->thread.hw_brk.address == 0) 1514 return -ENOENT; 1515 1516 child->thread.hw_brk.address = 0; 1517 child->thread.hw_brk.type = 0; 1518#endif /* CONFIG_HAVE_HW_BREAKPOINT */ 1519 1520 return 0; 1521#endif 1522} 1523 1524long arch_ptrace(struct task_struct *child, long request, 1525 unsigned long addr, unsigned long data) 1526{ 1527 int ret = -EPERM; 1528 void __user *datavp = (void __user *) data; 1529 unsigned long __user *datalp = datavp; 1530 1531 switch (request) { 1532 /* read the word at location addr in the USER area. */ 1533 case PTRACE_PEEKUSR: { 1534 unsigned long index, tmp; 1535 1536 ret = -EIO; 1537 /* convert to index and check */ 1538#ifdef CONFIG_PPC32 1539 index = addr >> 2; 1540 if ((addr & 3) || (index > PT_FPSCR) 1541 || (child->thread.regs == NULL)) 1542#else 1543 index = addr >> 3; 1544 if ((addr & 7) || (index > PT_FPSCR)) 1545#endif 1546 break; 1547 1548 CHECK_FULL_REGS(child->thread.regs); 1549 if (index < PT_FPR0) { 1550 ret = ptrace_get_reg(child, (int) index, &tmp); 1551 if (ret) 1552 break; 1553 } else { 1554 unsigned int fpidx = index - PT_FPR0; 1555 1556 flush_fp_to_thread(child); 1557 if (fpidx < (PT_FPSCR - PT_FPR0)) 1558 memcpy(&tmp, &child->thread.TS_FPR(fpidx), 1559 sizeof(long)); 1560 else 1561 tmp = child->thread.fp_state.fpscr; 1562 } 1563 ret = put_user(tmp, datalp); 1564 break; 1565 } 1566 1567 /* write the word at location addr in the USER area */ 1568 case PTRACE_POKEUSR: { 1569 unsigned long index; 1570 1571 ret = -EIO; 1572 /* convert to index and check */ 1573#ifdef CONFIG_PPC32 1574 index = addr >> 2; 1575 if ((addr & 3) || (index > PT_FPSCR) 1576 || (child->thread.regs == NULL)) 1577#else 1578 index = addr >> 3; 1579 if ((addr & 7) || (index > PT_FPSCR)) 1580#endif 1581 break; 1582 1583 CHECK_FULL_REGS(child->thread.regs); 1584 if (index < PT_FPR0) { 1585 ret = ptrace_put_reg(child, index, data); 1586 } else { 1587 unsigned int fpidx = index - PT_FPR0; 1588 1589 flush_fp_to_thread(child); 1590 if (fpidx < (PT_FPSCR - PT_FPR0)) 1591 memcpy(&child->thread.TS_FPR(fpidx), &data, 1592 sizeof(long)); 1593 else 1594 child->thread.fp_state.fpscr = data; 1595 ret = 0; 1596 } 1597 break; 1598 } 1599 1600 case PPC_PTRACE_GETHWDBGINFO: { 1601 struct ppc_debug_info dbginfo; 1602 1603 dbginfo.version = 1; 1604#ifdef CONFIG_PPC_ADV_DEBUG_REGS 1605 dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS; 1606 dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS; 1607 dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS; 1608 dbginfo.data_bp_alignment = 4; 1609 dbginfo.sizeof_condition = 4; 1610 dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE | 1611 PPC_DEBUG_FEATURE_INSN_BP_MASK; 1612#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE 1613 dbginfo.features |= 1614 PPC_DEBUG_FEATURE_DATA_BP_RANGE | 1615 PPC_DEBUG_FEATURE_DATA_BP_MASK; 1616#endif 1617#else /* !CONFIG_PPC_ADV_DEBUG_REGS */ 1618 dbginfo.num_instruction_bps = 0; 1619 dbginfo.num_data_bps = 1; 1620 dbginfo.num_condition_regs = 0; 1621#ifdef CONFIG_PPC64 1622 dbginfo.data_bp_alignment = 8; 1623#else 1624 dbginfo.data_bp_alignment = 4; 1625#endif 1626 dbginfo.sizeof_condition = 0; 1627#ifdef CONFIG_HAVE_HW_BREAKPOINT 1628 dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE; 1629 if (cpu_has_feature(CPU_FTR_DAWR)) 1630 dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR; 1631#else 1632 dbginfo.features = 0; 1633#endif /* CONFIG_HAVE_HW_BREAKPOINT */ 1634#endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 1635 1636 if (!access_ok(VERIFY_WRITE, datavp, 1637 sizeof(struct ppc_debug_info))) 1638 return -EFAULT; 1639 ret = __copy_to_user(datavp, &dbginfo, 1640 sizeof(struct ppc_debug_info)) ? 1641 -EFAULT : 0; 1642 break; 1643 } 1644 1645 case PPC_PTRACE_SETHWDEBUG: { 1646 struct ppc_hw_breakpoint bp_info; 1647 1648 if (!access_ok(VERIFY_READ, datavp, 1649 sizeof(struct ppc_hw_breakpoint))) 1650 return -EFAULT; 1651 ret = __copy_from_user(&bp_info, datavp, 1652 sizeof(struct ppc_hw_breakpoint)) ? 1653 -EFAULT : 0; 1654 if (!ret) 1655 ret = ppc_set_hwdebug(child, &bp_info); 1656 break; 1657 } 1658 1659 case PPC_PTRACE_DELHWDEBUG: { 1660 ret = ppc_del_hwdebug(child, data); 1661 break; 1662 } 1663 1664 case PTRACE_GET_DEBUGREG: { 1665#ifndef CONFIG_PPC_ADV_DEBUG_REGS 1666 unsigned long dabr_fake; 1667#endif 1668 ret = -EINVAL; 1669 /* We only support one DABR and no IABRS at the moment */ 1670 if (addr > 0) 1671 break; 1672#ifdef CONFIG_PPC_ADV_DEBUG_REGS 1673 ret = put_user(child->thread.debug.dac1, datalp); 1674#else 1675 dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) | 1676 (child->thread.hw_brk.type & HW_BRK_TYPE_DABR)); 1677 ret = put_user(dabr_fake, datalp); 1678#endif 1679 break; 1680 } 1681 1682 case PTRACE_SET_DEBUGREG: 1683 ret = ptrace_set_debugreg(child, addr, data); 1684 break; 1685 1686#ifdef CONFIG_PPC64 1687 case PTRACE_GETREGS64: 1688#endif 1689 case PTRACE_GETREGS: /* Get all pt_regs from the child. */ 1690 return copy_regset_to_user(child, &user_ppc_native_view, 1691 REGSET_GPR, 1692 0, sizeof(struct pt_regs), 1693 datavp); 1694 1695#ifdef CONFIG_PPC64 1696 case PTRACE_SETREGS64: 1697#endif 1698 case PTRACE_SETREGS: /* Set all gp regs in the child. */ 1699 return copy_regset_from_user(child, &user_ppc_native_view, 1700 REGSET_GPR, 1701 0, sizeof(struct pt_regs), 1702 datavp); 1703 1704 case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */ 1705 return copy_regset_to_user(child, &user_ppc_native_view, 1706 REGSET_FPR, 1707 0, sizeof(elf_fpregset_t), 1708 datavp); 1709 1710 case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */ 1711 return copy_regset_from_user(child, &user_ppc_native_view, 1712 REGSET_FPR, 1713 0, sizeof(elf_fpregset_t), 1714 datavp); 1715 1716#ifdef CONFIG_ALTIVEC 1717 case PTRACE_GETVRREGS: 1718 return copy_regset_to_user(child, &user_ppc_native_view, 1719 REGSET_VMX, 1720 0, (33 * sizeof(vector128) + 1721 sizeof(u32)), 1722 datavp); 1723 1724 case PTRACE_SETVRREGS: 1725 return copy_regset_from_user(child, &user_ppc_native_view, 1726 REGSET_VMX, 1727 0, (33 * sizeof(vector128) + 1728 sizeof(u32)), 1729 datavp); 1730#endif 1731#ifdef CONFIG_VSX 1732 case PTRACE_GETVSRREGS: 1733 return copy_regset_to_user(child, &user_ppc_native_view, 1734 REGSET_VSX, 1735 0, 32 * sizeof(double), 1736 datavp); 1737 1738 case PTRACE_SETVSRREGS: 1739 return copy_regset_from_user(child, &user_ppc_native_view, 1740 REGSET_VSX, 1741 0, 32 * sizeof(double), 1742 datavp); 1743#endif 1744#ifdef CONFIG_SPE 1745 case PTRACE_GETEVRREGS: 1746 /* Get the child spe register state. */ 1747 return copy_regset_to_user(child, &user_ppc_native_view, 1748 REGSET_SPE, 0, 35 * sizeof(u32), 1749 datavp); 1750 1751 case PTRACE_SETEVRREGS: 1752 /* Set the child spe register state. */ 1753 return copy_regset_from_user(child, &user_ppc_native_view, 1754 REGSET_SPE, 0, 35 * sizeof(u32), 1755 datavp); 1756#endif 1757 1758 default: 1759 ret = ptrace_request(child, request, addr, data); 1760 break; 1761 } 1762 return ret; 1763} 1764 1765/* 1766 * We must return the syscall number to actually look up in the table. 1767 * This can be -1L to skip running any syscall at all. 1768 */ 1769long do_syscall_trace_enter(struct pt_regs *regs) 1770{ 1771 long ret = 0; 1772 1773 user_exit(); 1774 1775 secure_computing_strict(regs->gpr[0]); 1776 1777 if (test_thread_flag(TIF_SYSCALL_TRACE) && 1778 tracehook_report_syscall_entry(regs)) 1779 /* 1780 * Tracing decided this syscall should not happen. 1781 * We'll return a bogus call number to get an ENOSYS 1782 * error, but leave the original number in regs->gpr[0]. 1783 */ 1784 ret = -1L; 1785 1786 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 1787 trace_sys_enter(regs, regs->gpr[0]); 1788 1789#ifdef CONFIG_PPC64 1790 if (!is_32bit_task()) 1791 audit_syscall_entry(regs->gpr[0], regs->gpr[3], regs->gpr[4], 1792 regs->gpr[5], regs->gpr[6]); 1793 else 1794#endif 1795 audit_syscall_entry(regs->gpr[0], 1796 regs->gpr[3] & 0xffffffff, 1797 regs->gpr[4] & 0xffffffff, 1798 regs->gpr[5] & 0xffffffff, 1799 regs->gpr[6] & 0xffffffff); 1800 1801 return ret ?: regs->gpr[0]; 1802} 1803 1804void do_syscall_trace_leave(struct pt_regs *regs) 1805{ 1806 int step; 1807 1808 audit_syscall_exit(regs); 1809 1810 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 1811 trace_sys_exit(regs, regs->result); 1812 1813 step = test_thread_flag(TIF_SINGLESTEP); 1814 if (step || test_thread_flag(TIF_SYSCALL_TRACE)) 1815 tracehook_report_syscall_exit(regs, step); 1816 1817 user_enter(); 1818} 1819