1/* 2 * linux/kernel/ptrace.c 3 * 4 * (C) Copyright 1999 Linus Torvalds 5 * 6 * Common interfaces for "ptrace()" which we do not want 7 * to continually duplicate across every architecture. 8 */ 9 10#include <linux/capability.h> 11#include <linux/export.h> 12#include <linux/sched.h> 13#include <linux/errno.h> 14#include <linux/mm.h> 15#include <linux/highmem.h> 16#include <linux/pagemap.h> 17#include <linux/ptrace.h> 18#include <linux/security.h> 19#include <linux/signal.h> 20#include <linux/uio.h> 21#include <linux/audit.h> 22#include <linux/pid_namespace.h> 23#include <linux/syscalls.h> 24#include <linux/uaccess.h> 25#include <linux/regset.h> 26#include <linux/hw_breakpoint.h> 27#include <linux/cn_proc.h> 28#include <linux/compat.h> 29 30 31/* 32 * ptrace a task: make the debugger its new parent and 33 * move it to the ptrace list. 34 * 35 * Must be called with the tasklist lock write-held. 36 */ 37void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) 38{ 39 BUG_ON(!list_empty(&child->ptrace_entry)); 40 list_add(&child->ptrace_entry, &new_parent->ptraced); 41 child->parent = new_parent; 42} 43 44/** 45 * __ptrace_unlink - unlink ptracee and restore its execution state 46 * @child: ptracee to be unlinked 47 * 48 * Remove @child from the ptrace list, move it back to the original parent, 49 * and restore the execution state so that it conforms to the group stop 50 * state. 51 * 52 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer 53 * exiting. For PTRACE_DETACH, unless the ptracee has been killed between 54 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED. 55 * If the ptracer is exiting, the ptracee can be in any state. 56 * 57 * After detach, the ptracee should be in a state which conforms to the 58 * group stop. If the group is stopped or in the process of stopping, the 59 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken 60 * up from TASK_TRACED. 61 * 62 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED, 63 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar 64 * to but in the opposite direction of what happens while attaching to a 65 * stopped task. However, in this direction, the intermediate RUNNING 66 * state is not hidden even from the current ptracer and if it immediately 67 * re-attaches and performs a WNOHANG wait(2), it may fail. 68 * 69 * CONTEXT: 70 * write_lock_irq(tasklist_lock) 71 */ 72void __ptrace_unlink(struct task_struct *child) 73{ 74 BUG_ON(!child->ptrace); 75 76 child->ptrace = 0; 77 child->parent = child->real_parent; 78 list_del_init(&child->ptrace_entry); 79 80 spin_lock(&child->sighand->siglock); 81 82 /* 83 * Clear all pending traps and TRAPPING. TRAPPING should be 84 * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly. 85 */ 86 task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK); 87 task_clear_jobctl_trapping(child); 88 89 /* 90 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and 91 * @child isn't dead. 92 */ 93 if (!(child->flags & PF_EXITING) && 94 (child->signal->flags & SIGNAL_STOP_STOPPED || 95 child->signal->group_stop_count)) { 96 child->jobctl |= JOBCTL_STOP_PENDING; 97 98 /* 99 * This is only possible if this thread was cloned by the 100 * traced task running in the stopped group, set the signal 101 * for the future reports. 102 * FIXME: we should change ptrace_init_task() to handle this 103 * case. 104 */ 105 if (!(child->jobctl & JOBCTL_STOP_SIGMASK)) 106 child->jobctl |= SIGSTOP; 107 } 108 109 /* 110 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick 111 * @child in the butt. Note that @resume should be used iff @child 112 * is in TASK_TRACED; otherwise, we might unduly disrupt 113 * TASK_KILLABLE sleeps. 114 */ 115 if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child)) 116 ptrace_signal_wake_up(child, true); 117 118 spin_unlock(&child->sighand->siglock); 119} 120 121/* Ensure that nothing can wake it up, even SIGKILL */ 122static bool ptrace_freeze_traced(struct task_struct *task) 123{ 124 bool ret = false; 125 126 /* Lockless, nobody but us can set this flag */ 127 if (task->jobctl & JOBCTL_LISTENING) 128 return ret; 129 130 spin_lock_irq(&task->sighand->siglock); 131 if (task_is_traced(task) && !__fatal_signal_pending(task)) { 132 task->state = __TASK_TRACED; 133 ret = true; 134 } 135 spin_unlock_irq(&task->sighand->siglock); 136 137 return ret; 138} 139 140static void ptrace_unfreeze_traced(struct task_struct *task) 141{ 142 if (task->state != __TASK_TRACED) 143 return; 144 145 WARN_ON(!task->ptrace || task->parent != current); 146 147 spin_lock_irq(&task->sighand->siglock); 148 if (__fatal_signal_pending(task)) 149 wake_up_state(task, __TASK_TRACED); 150 else 151 task->state = TASK_TRACED; 152 spin_unlock_irq(&task->sighand->siglock); 153} 154 155/** 156 * ptrace_check_attach - check whether ptracee is ready for ptrace operation 157 * @child: ptracee to check for 158 * @ignore_state: don't check whether @child is currently %TASK_TRACED 159 * 160 * Check whether @child is being ptraced by %current and ready for further 161 * ptrace operations. If @ignore_state is %false, @child also should be in 162 * %TASK_TRACED state and on return the child is guaranteed to be traced 163 * and not executing. If @ignore_state is %true, @child can be in any 164 * state. 165 * 166 * CONTEXT: 167 * Grabs and releases tasklist_lock and @child->sighand->siglock. 168 * 169 * RETURNS: 170 * 0 on success, -ESRCH if %child is not ready. 171 */ 172static int ptrace_check_attach(struct task_struct *child, bool ignore_state) 173{ 174 int ret = -ESRCH; 175 176 /* 177 * We take the read lock around doing both checks to close a 178 * possible race where someone else was tracing our child and 179 * detached between these two checks. After this locked check, 180 * we are sure that this is our traced child and that can only 181 * be changed by us so it's not changing right after this. 182 */ 183 read_lock(&tasklist_lock); 184 if (child->ptrace && child->parent == current) { 185 WARN_ON(child->state == __TASK_TRACED); 186 /* 187 * child->sighand can't be NULL, release_task() 188 * does ptrace_unlink() before __exit_signal(). 189 */ 190 if (ignore_state || ptrace_freeze_traced(child)) 191 ret = 0; 192 } 193 read_unlock(&tasklist_lock); 194 195 if (!ret && !ignore_state) { 196 if (!wait_task_inactive(child, __TASK_TRACED)) { 197 /* 198 * This can only happen if may_ptrace_stop() fails and 199 * ptrace_stop() changes ->state back to TASK_RUNNING, 200 * so we should not worry about leaking __TASK_TRACED. 201 */ 202 WARN_ON(child->state == __TASK_TRACED); 203 ret = -ESRCH; 204 } 205 } 206 207 return ret; 208} 209 210static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode) 211{ 212 if (mode & PTRACE_MODE_NOAUDIT) 213 return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE); 214 else 215 return has_ns_capability(current, ns, CAP_SYS_PTRACE); 216} 217 218/* Returns 0 on success, -errno on denial. */ 219static int __ptrace_may_access(struct task_struct *task, unsigned int mode) 220{ 221 const struct cred *cred = current_cred(), *tcred; 222 int dumpable = 0; 223 kuid_t caller_uid; 224 kgid_t caller_gid; 225 226 if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) { 227 WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n"); 228 return -EPERM; 229 } 230 231 /* May we inspect the given task? 232 * This check is used both for attaching with ptrace 233 * and for allowing access to sensitive information in /proc. 234 * 235 * ptrace_attach denies several cases that /proc allows 236 * because setting up the necessary parent/child relationship 237 * or halting the specified task is impossible. 238 */ 239 240 /* Don't let security modules deny introspection */ 241 if (same_thread_group(task, current)) 242 return 0; 243 rcu_read_lock(); 244 if (mode & PTRACE_MODE_FSCREDS) { 245 caller_uid = cred->fsuid; 246 caller_gid = cred->fsgid; 247 } else { 248 /* 249 * Using the euid would make more sense here, but something 250 * in userland might rely on the old behavior, and this 251 * shouldn't be a security problem since 252 * PTRACE_MODE_REALCREDS implies that the caller explicitly 253 * used a syscall that requests access to another process 254 * (and not a filesystem syscall to procfs). 255 */ 256 caller_uid = cred->uid; 257 caller_gid = cred->gid; 258 } 259 tcred = __task_cred(task); 260 if (uid_eq(caller_uid, tcred->euid) && 261 uid_eq(caller_uid, tcred->suid) && 262 uid_eq(caller_uid, tcred->uid) && 263 gid_eq(caller_gid, tcred->egid) && 264 gid_eq(caller_gid, tcred->sgid) && 265 gid_eq(caller_gid, tcred->gid)) 266 goto ok; 267 if (ptrace_has_cap(tcred->user_ns, mode)) 268 goto ok; 269 rcu_read_unlock(); 270 return -EPERM; 271ok: 272 rcu_read_unlock(); 273 smp_rmb(); 274 if (task->mm) 275 dumpable = get_dumpable(task->mm); 276 rcu_read_lock(); 277 if (dumpable != SUID_DUMP_USER && 278 !ptrace_has_cap(__task_cred(task)->user_ns, mode)) { 279 rcu_read_unlock(); 280 return -EPERM; 281 } 282 rcu_read_unlock(); 283 284 return security_ptrace_access_check(task, mode); 285} 286 287bool ptrace_may_access(struct task_struct *task, unsigned int mode) 288{ 289 int err; 290 task_lock(task); 291 err = __ptrace_may_access(task, mode); 292 task_unlock(task); 293 return !err; 294} 295 296static int ptrace_attach(struct task_struct *task, long request, 297 unsigned long addr, 298 unsigned long flags) 299{ 300 bool seize = (request == PTRACE_SEIZE); 301 int retval; 302 303 retval = -EIO; 304 if (seize) { 305 if (addr != 0) 306 goto out; 307 if (flags & ~(unsigned long)PTRACE_O_MASK) 308 goto out; 309 flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT); 310 } else { 311 flags = PT_PTRACED; 312 } 313 314 audit_ptrace(task); 315 316 retval = -EPERM; 317 if (unlikely(task->flags & PF_KTHREAD)) 318 goto out; 319 if (same_thread_group(task, current)) 320 goto out; 321 322 /* 323 * Protect exec's credential calculations against our interference; 324 * SUID, SGID and LSM creds get determined differently 325 * under ptrace. 326 */ 327 retval = -ERESTARTNOINTR; 328 if (mutex_lock_interruptible(&task->signal->cred_guard_mutex)) 329 goto out; 330 331 task_lock(task); 332 retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS); 333 task_unlock(task); 334 if (retval) 335 goto unlock_creds; 336 337 write_lock_irq(&tasklist_lock); 338 retval = -EPERM; 339 if (unlikely(task->exit_state)) 340 goto unlock_tasklist; 341 if (task->ptrace) 342 goto unlock_tasklist; 343 344 if (seize) 345 flags |= PT_SEIZED; 346 rcu_read_lock(); 347 if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE)) 348 flags |= PT_PTRACE_CAP; 349 rcu_read_unlock(); 350 task->ptrace = flags; 351 352 __ptrace_link(task, current); 353 354 /* SEIZE doesn't trap tracee on attach */ 355 if (!seize) 356 send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); 357 358 spin_lock(&task->sighand->siglock); 359 360 /* 361 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and 362 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING 363 * will be cleared if the child completes the transition or any 364 * event which clears the group stop states happens. We'll wait 365 * for the transition to complete before returning from this 366 * function. 367 * 368 * This hides STOPPED -> RUNNING -> TRACED transition from the 369 * attaching thread but a different thread in the same group can 370 * still observe the transient RUNNING state. IOW, if another 371 * thread's WNOHANG wait(2) on the stopped tracee races against 372 * ATTACH, the wait(2) may fail due to the transient RUNNING. 373 * 374 * The following task_is_stopped() test is safe as both transitions 375 * in and out of STOPPED are protected by siglock. 376 */ 377 if (task_is_stopped(task) && 378 task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) 379 signal_wake_up_state(task, __TASK_STOPPED); 380 381 spin_unlock(&task->sighand->siglock); 382 383 retval = 0; 384unlock_tasklist: 385 write_unlock_irq(&tasklist_lock); 386unlock_creds: 387 mutex_unlock(&task->signal->cred_guard_mutex); 388out: 389 if (!retval) { 390 wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, 391 TASK_UNINTERRUPTIBLE); 392 proc_ptrace_connector(task, PTRACE_ATTACH); 393 } 394 395 return retval; 396} 397 398/** 399 * ptrace_traceme -- helper for PTRACE_TRACEME 400 * 401 * Performs checks and sets PT_PTRACED. 402 * Should be used by all ptrace implementations for PTRACE_TRACEME. 403 */ 404static int ptrace_traceme(void) 405{ 406 int ret = -EPERM; 407 408 write_lock_irq(&tasklist_lock); 409 /* Are we already being traced? */ 410 if (!current->ptrace) { 411 ret = security_ptrace_traceme(current->parent); 412 /* 413 * Check PF_EXITING to ensure ->real_parent has not passed 414 * exit_ptrace(). Otherwise we don't report the error but 415 * pretend ->real_parent untraces us right after return. 416 */ 417 if (!ret && !(current->real_parent->flags & PF_EXITING)) { 418 current->ptrace = PT_PTRACED; 419 __ptrace_link(current, current->real_parent); 420 } 421 } 422 write_unlock_irq(&tasklist_lock); 423 424 return ret; 425} 426 427/* 428 * Called with irqs disabled, returns true if childs should reap themselves. 429 */ 430static int ignoring_children(struct sighand_struct *sigh) 431{ 432 int ret; 433 spin_lock(&sigh->siglock); 434 ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) || 435 (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT); 436 spin_unlock(&sigh->siglock); 437 return ret; 438} 439 440/* 441 * Called with tasklist_lock held for writing. 442 * Unlink a traced task, and clean it up if it was a traced zombie. 443 * Return true if it needs to be reaped with release_task(). 444 * (We can't call release_task() here because we already hold tasklist_lock.) 445 * 446 * If it's a zombie, our attachedness prevented normal parent notification 447 * or self-reaping. Do notification now if it would have happened earlier. 448 * If it should reap itself, return true. 449 * 450 * If it's our own child, there is no notification to do. But if our normal 451 * children self-reap, then this child was prevented by ptrace and we must 452 * reap it now, in that case we must also wake up sub-threads sleeping in 453 * do_wait(). 454 */ 455static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) 456{ 457 bool dead; 458 459 __ptrace_unlink(p); 460 461 if (p->exit_state != EXIT_ZOMBIE) 462 return false; 463 464 dead = !thread_group_leader(p); 465 466 if (!dead && thread_group_empty(p)) { 467 if (!same_thread_group(p->real_parent, tracer)) 468 dead = do_notify_parent(p, p->exit_signal); 469 else if (ignoring_children(tracer->sighand)) { 470 __wake_up_parent(p, tracer); 471 dead = true; 472 } 473 } 474 /* Mark it as in the process of being reaped. */ 475 if (dead) 476 p->exit_state = EXIT_DEAD; 477 return dead; 478} 479 480static int ptrace_detach(struct task_struct *child, unsigned int data) 481{ 482 if (!valid_signal(data)) 483 return -EIO; 484 485 /* Architecture-specific hardware disable .. */ 486 ptrace_disable(child); 487 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 488 489 write_lock_irq(&tasklist_lock); 490 /* 491 * We rely on ptrace_freeze_traced(). It can't be killed and 492 * untraced by another thread, it can't be a zombie. 493 */ 494 WARN_ON(!child->ptrace || child->exit_state); 495 /* 496 * tasklist_lock avoids the race with wait_task_stopped(), see 497 * the comment in ptrace_resume(). 498 */ 499 child->exit_code = data; 500 __ptrace_detach(current, child); 501 write_unlock_irq(&tasklist_lock); 502 503 proc_ptrace_connector(child, PTRACE_DETACH); 504 505 return 0; 506} 507 508/* 509 * Detach all tasks we were using ptrace on. Called with tasklist held 510 * for writing. 511 */ 512void exit_ptrace(struct task_struct *tracer, struct list_head *dead) 513{ 514 struct task_struct *p, *n; 515 516 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) { 517 if (unlikely(p->ptrace & PT_EXITKILL)) 518 send_sig_info(SIGKILL, SEND_SIG_FORCED, p); 519 520 if (__ptrace_detach(tracer, p)) 521 list_add(&p->ptrace_entry, dead); 522 } 523} 524 525int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) 526{ 527 int copied = 0; 528 529 while (len > 0) { 530 char buf[128]; 531 int this_len, retval; 532 533 this_len = (len > sizeof(buf)) ? sizeof(buf) : len; 534 retval = access_process_vm(tsk, src, buf, this_len, 0); 535 if (!retval) { 536 if (copied) 537 break; 538 return -EIO; 539 } 540 if (copy_to_user(dst, buf, retval)) 541 return -EFAULT; 542 copied += retval; 543 src += retval; 544 dst += retval; 545 len -= retval; 546 } 547 return copied; 548} 549 550int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len) 551{ 552 int copied = 0; 553 554 while (len > 0) { 555 char buf[128]; 556 int this_len, retval; 557 558 this_len = (len > sizeof(buf)) ? sizeof(buf) : len; 559 if (copy_from_user(buf, src, this_len)) 560 return -EFAULT; 561 retval = access_process_vm(tsk, dst, buf, this_len, 1); 562 if (!retval) { 563 if (copied) 564 break; 565 return -EIO; 566 } 567 copied += retval; 568 src += retval; 569 dst += retval; 570 len -= retval; 571 } 572 return copied; 573} 574 575static int ptrace_setoptions(struct task_struct *child, unsigned long data) 576{ 577 unsigned flags; 578 579 if (data & ~(unsigned long)PTRACE_O_MASK) 580 return -EINVAL; 581 582 /* Avoid intermediate state when all opts are cleared */ 583 flags = child->ptrace; 584 flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT); 585 flags |= (data << PT_OPT_FLAG_SHIFT); 586 child->ptrace = flags; 587 588 return 0; 589} 590 591static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info) 592{ 593 unsigned long flags; 594 int error = -ESRCH; 595 596 if (lock_task_sighand(child, &flags)) { 597 error = -EINVAL; 598 if (likely(child->last_siginfo != NULL)) { 599 *info = *child->last_siginfo; 600 error = 0; 601 } 602 unlock_task_sighand(child, &flags); 603 } 604 return error; 605} 606 607static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info) 608{ 609 unsigned long flags; 610 int error = -ESRCH; 611 612 if (lock_task_sighand(child, &flags)) { 613 error = -EINVAL; 614 if (likely(child->last_siginfo != NULL)) { 615 *child->last_siginfo = *info; 616 error = 0; 617 } 618 unlock_task_sighand(child, &flags); 619 } 620 return error; 621} 622 623static int ptrace_peek_siginfo(struct task_struct *child, 624 unsigned long addr, 625 unsigned long data) 626{ 627 struct ptrace_peeksiginfo_args arg; 628 struct sigpending *pending; 629 struct sigqueue *q; 630 int ret, i; 631 632 ret = copy_from_user(&arg, (void __user *) addr, 633 sizeof(struct ptrace_peeksiginfo_args)); 634 if (ret) 635 return -EFAULT; 636 637 if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED) 638 return -EINVAL; /* unknown flags */ 639 640 if (arg.nr < 0) 641 return -EINVAL; 642 643 if (arg.flags & PTRACE_PEEKSIGINFO_SHARED) 644 pending = &child->signal->shared_pending; 645 else 646 pending = &child->pending; 647 648 for (i = 0; i < arg.nr; ) { 649 siginfo_t info; 650 s32 off = arg.off + i; 651 652 spin_lock_irq(&child->sighand->siglock); 653 list_for_each_entry(q, &pending->list, list) { 654 if (!off--) { 655 copy_siginfo(&info, &q->info); 656 break; 657 } 658 } 659 spin_unlock_irq(&child->sighand->siglock); 660 661 if (off >= 0) /* beyond the end of the list */ 662 break; 663 664#ifdef CONFIG_COMPAT 665 if (unlikely(is_compat_task())) { 666 compat_siginfo_t __user *uinfo = compat_ptr(data); 667 668 if (copy_siginfo_to_user32(uinfo, &info) || 669 __put_user(info.si_code, &uinfo->si_code)) { 670 ret = -EFAULT; 671 break; 672 } 673 674 } else 675#endif 676 { 677 siginfo_t __user *uinfo = (siginfo_t __user *) data; 678 679 if (copy_siginfo_to_user(uinfo, &info) || 680 __put_user(info.si_code, &uinfo->si_code)) { 681 ret = -EFAULT; 682 break; 683 } 684 } 685 686 data += sizeof(siginfo_t); 687 i++; 688 689 if (signal_pending(current)) 690 break; 691 692 cond_resched(); 693 } 694 695 if (i > 0) 696 return i; 697 698 return ret; 699} 700 701#ifdef PTRACE_SINGLESTEP 702#define is_singlestep(request) ((request) == PTRACE_SINGLESTEP) 703#else 704#define is_singlestep(request) 0 705#endif 706 707#ifdef PTRACE_SINGLEBLOCK 708#define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK) 709#else 710#define is_singleblock(request) 0 711#endif 712 713#ifdef PTRACE_SYSEMU 714#define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP) 715#else 716#define is_sysemu_singlestep(request) 0 717#endif 718 719static int ptrace_resume(struct task_struct *child, long request, 720 unsigned long data) 721{ 722 bool need_siglock; 723 724 if (!valid_signal(data)) 725 return -EIO; 726 727 if (request == PTRACE_SYSCALL) 728 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 729 else 730 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 731 732#ifdef TIF_SYSCALL_EMU 733 if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP) 734 set_tsk_thread_flag(child, TIF_SYSCALL_EMU); 735 else 736 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); 737#endif 738 739 if (is_singleblock(request)) { 740 if (unlikely(!arch_has_block_step())) 741 return -EIO; 742 user_enable_block_step(child); 743 } else if (is_singlestep(request) || is_sysemu_singlestep(request)) { 744 if (unlikely(!arch_has_single_step())) 745 return -EIO; 746 user_enable_single_step(child); 747 } else { 748 user_disable_single_step(child); 749 } 750 751 /* 752 * Change ->exit_code and ->state under siglock to avoid the race 753 * with wait_task_stopped() in between; a non-zero ->exit_code will 754 * wrongly look like another report from tracee. 755 * 756 * Note that we need siglock even if ->exit_code == data and/or this 757 * status was not reported yet, the new status must not be cleared by 758 * wait_task_stopped() after resume. 759 * 760 * If data == 0 we do not care if wait_task_stopped() reports the old 761 * status and clears the code too; this can't race with the tracee, it 762 * takes siglock after resume. 763 */ 764 need_siglock = data && !thread_group_empty(current); 765 if (need_siglock) 766 spin_lock_irq(&child->sighand->siglock); 767 child->exit_code = data; 768 wake_up_state(child, __TASK_TRACED); 769 if (need_siglock) 770 spin_unlock_irq(&child->sighand->siglock); 771 772 return 0; 773} 774 775#ifdef CONFIG_HAVE_ARCH_TRACEHOOK 776 777static const struct user_regset * 778find_regset(const struct user_regset_view *view, unsigned int type) 779{ 780 const struct user_regset *regset; 781 int n; 782 783 for (n = 0; n < view->n; ++n) { 784 regset = view->regsets + n; 785 if (regset->core_note_type == type) 786 return regset; 787 } 788 789 return NULL; 790} 791 792static int ptrace_regset(struct task_struct *task, int req, unsigned int type, 793 struct iovec *kiov) 794{ 795 const struct user_regset_view *view = task_user_regset_view(task); 796 const struct user_regset *regset = find_regset(view, type); 797 int regset_no; 798 799 if (!regset || (kiov->iov_len % regset->size) != 0) 800 return -EINVAL; 801 802 regset_no = regset - view->regsets; 803 kiov->iov_len = min(kiov->iov_len, 804 (__kernel_size_t) (regset->n * regset->size)); 805 806 if (req == PTRACE_GETREGSET) 807 return copy_regset_to_user(task, view, regset_no, 0, 808 kiov->iov_len, kiov->iov_base); 809 else 810 return copy_regset_from_user(task, view, regset_no, 0, 811 kiov->iov_len, kiov->iov_base); 812} 813 814/* 815 * This is declared in linux/regset.h and defined in machine-dependent 816 * code. We put the export here, near the primary machine-neutral use, 817 * to ensure no machine forgets it. 818 */ 819EXPORT_SYMBOL_GPL(task_user_regset_view); 820#endif 821 822int ptrace_request(struct task_struct *child, long request, 823 unsigned long addr, unsigned long data) 824{ 825 bool seized = child->ptrace & PT_SEIZED; 826 int ret = -EIO; 827 siginfo_t siginfo, *si; 828 void __user *datavp = (void __user *) data; 829 unsigned long __user *datalp = datavp; 830 unsigned long flags; 831 832 switch (request) { 833 case PTRACE_PEEKTEXT: 834 case PTRACE_PEEKDATA: 835 return generic_ptrace_peekdata(child, addr, data); 836 case PTRACE_POKETEXT: 837 case PTRACE_POKEDATA: 838 return generic_ptrace_pokedata(child, addr, data); 839 840#ifdef PTRACE_OLDSETOPTIONS 841 case PTRACE_OLDSETOPTIONS: 842#endif 843 case PTRACE_SETOPTIONS: 844 ret = ptrace_setoptions(child, data); 845 break; 846 case PTRACE_GETEVENTMSG: 847 ret = put_user(child->ptrace_message, datalp); 848 break; 849 850 case PTRACE_PEEKSIGINFO: 851 ret = ptrace_peek_siginfo(child, addr, data); 852 break; 853 854 case PTRACE_GETSIGINFO: 855 ret = ptrace_getsiginfo(child, &siginfo); 856 if (!ret) 857 ret = copy_siginfo_to_user(datavp, &siginfo); 858 break; 859 860 case PTRACE_SETSIGINFO: 861 if (copy_from_user(&siginfo, datavp, sizeof siginfo)) 862 ret = -EFAULT; 863 else 864 ret = ptrace_setsiginfo(child, &siginfo); 865 break; 866 867 case PTRACE_GETSIGMASK: 868 if (addr != sizeof(sigset_t)) { 869 ret = -EINVAL; 870 break; 871 } 872 873 if (copy_to_user(datavp, &child->blocked, sizeof(sigset_t))) 874 ret = -EFAULT; 875 else 876 ret = 0; 877 878 break; 879 880 case PTRACE_SETSIGMASK: { 881 sigset_t new_set; 882 883 if (addr != sizeof(sigset_t)) { 884 ret = -EINVAL; 885 break; 886 } 887 888 if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) { 889 ret = -EFAULT; 890 break; 891 } 892 893 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); 894 895 /* 896 * Every thread does recalc_sigpending() after resume, so 897 * retarget_shared_pending() and recalc_sigpending() are not 898 * called here. 899 */ 900 spin_lock_irq(&child->sighand->siglock); 901 child->blocked = new_set; 902 spin_unlock_irq(&child->sighand->siglock); 903 904 ret = 0; 905 break; 906 } 907 908 case PTRACE_INTERRUPT: 909 /* 910 * Stop tracee without any side-effect on signal or job 911 * control. At least one trap is guaranteed to happen 912 * after this request. If @child is already trapped, the 913 * current trap is not disturbed and another trap will 914 * happen after the current trap is ended with PTRACE_CONT. 915 * 916 * The actual trap might not be PTRACE_EVENT_STOP trap but 917 * the pending condition is cleared regardless. 918 */ 919 if (unlikely(!seized || !lock_task_sighand(child, &flags))) 920 break; 921 922 /* 923 * INTERRUPT doesn't disturb existing trap sans one 924 * exception. If ptracer issued LISTEN for the current 925 * STOP, this INTERRUPT should clear LISTEN and re-trap 926 * tracee into STOP. 927 */ 928 if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP))) 929 ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); 930 931 unlock_task_sighand(child, &flags); 932 ret = 0; 933 break; 934 935 case PTRACE_LISTEN: 936 /* 937 * Listen for events. Tracee must be in STOP. It's not 938 * resumed per-se but is not considered to be in TRACED by 939 * wait(2) or ptrace(2). If an async event (e.g. group 940 * stop state change) happens, tracee will enter STOP trap 941 * again. Alternatively, ptracer can issue INTERRUPT to 942 * finish listening and re-trap tracee into STOP. 943 */ 944 if (unlikely(!seized || !lock_task_sighand(child, &flags))) 945 break; 946 947 si = child->last_siginfo; 948 if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) { 949 child->jobctl |= JOBCTL_LISTENING; 950 /* 951 * If NOTIFY is set, it means event happened between 952 * start of this trap and now. Trigger re-trap. 953 */ 954 if (child->jobctl & JOBCTL_TRAP_NOTIFY) 955 ptrace_signal_wake_up(child, true); 956 ret = 0; 957 } 958 unlock_task_sighand(child, &flags); 959 break; 960 961 case PTRACE_DETACH: /* detach a process that was attached. */ 962 ret = ptrace_detach(child, data); 963 break; 964 965#ifdef CONFIG_BINFMT_ELF_FDPIC 966 case PTRACE_GETFDPIC: { 967 struct mm_struct *mm = get_task_mm(child); 968 unsigned long tmp = 0; 969 970 ret = -ESRCH; 971 if (!mm) 972 break; 973 974 switch (addr) { 975 case PTRACE_GETFDPIC_EXEC: 976 tmp = mm->context.exec_fdpic_loadmap; 977 break; 978 case PTRACE_GETFDPIC_INTERP: 979 tmp = mm->context.interp_fdpic_loadmap; 980 break; 981 default: 982 break; 983 } 984 mmput(mm); 985 986 ret = put_user(tmp, datalp); 987 break; 988 } 989#endif 990 991#ifdef PTRACE_SINGLESTEP 992 case PTRACE_SINGLESTEP: 993#endif 994#ifdef PTRACE_SINGLEBLOCK 995 case PTRACE_SINGLEBLOCK: 996#endif 997#ifdef PTRACE_SYSEMU 998 case PTRACE_SYSEMU: 999 case PTRACE_SYSEMU_SINGLESTEP: 1000#endif 1001 case PTRACE_SYSCALL: 1002 case PTRACE_CONT: 1003 return ptrace_resume(child, request, data); 1004 1005 case PTRACE_KILL: 1006 if (child->exit_state) /* already dead */ 1007 return 0; 1008 return ptrace_resume(child, request, SIGKILL); 1009 1010#ifdef CONFIG_HAVE_ARCH_TRACEHOOK 1011 case PTRACE_GETREGSET: 1012 case PTRACE_SETREGSET: { 1013 struct iovec kiov; 1014 struct iovec __user *uiov = datavp; 1015 1016 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) 1017 return -EFAULT; 1018 1019 if (__get_user(kiov.iov_base, &uiov->iov_base) || 1020 __get_user(kiov.iov_len, &uiov->iov_len)) 1021 return -EFAULT; 1022 1023 ret = ptrace_regset(child, request, addr, &kiov); 1024 if (!ret) 1025 ret = __put_user(kiov.iov_len, &uiov->iov_len); 1026 break; 1027 } 1028#endif 1029 default: 1030 break; 1031 } 1032 1033 return ret; 1034} 1035 1036static struct task_struct *ptrace_get_task_struct(pid_t pid) 1037{ 1038 struct task_struct *child; 1039 1040 rcu_read_lock(); 1041 child = find_task_by_vpid(pid); 1042 if (child) 1043 get_task_struct(child); 1044 rcu_read_unlock(); 1045 1046 if (!child) 1047 return ERR_PTR(-ESRCH); 1048 return child; 1049} 1050 1051#ifndef arch_ptrace_attach 1052#define arch_ptrace_attach(child) do { } while (0) 1053#endif 1054 1055SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, 1056 unsigned long, data) 1057{ 1058 struct task_struct *child; 1059 long ret; 1060 1061 if (request == PTRACE_TRACEME) { 1062 ret = ptrace_traceme(); 1063 if (!ret) 1064 arch_ptrace_attach(current); 1065 goto out; 1066 } 1067 1068 child = ptrace_get_task_struct(pid); 1069 if (IS_ERR(child)) { 1070 ret = PTR_ERR(child); 1071 goto out; 1072 } 1073 1074 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { 1075 ret = ptrace_attach(child, request, addr, data); 1076 /* 1077 * Some architectures need to do book-keeping after 1078 * a ptrace attach. 1079 */ 1080 if (!ret) 1081 arch_ptrace_attach(child); 1082 goto out_put_task_struct; 1083 } 1084 1085 ret = ptrace_check_attach(child, request == PTRACE_KILL || 1086 request == PTRACE_INTERRUPT); 1087 if (ret < 0) 1088 goto out_put_task_struct; 1089 1090 ret = arch_ptrace(child, request, addr, data); 1091 if (ret || request != PTRACE_DETACH) 1092 ptrace_unfreeze_traced(child); 1093 1094 out_put_task_struct: 1095 put_task_struct(child); 1096 out: 1097 return ret; 1098} 1099 1100int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, 1101 unsigned long data) 1102{ 1103 unsigned long tmp; 1104 int copied; 1105 1106 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0); 1107 if (copied != sizeof(tmp)) 1108 return -EIO; 1109 return put_user(tmp, (unsigned long __user *)data); 1110} 1111 1112int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, 1113 unsigned long data) 1114{ 1115 int copied; 1116 1117 copied = access_process_vm(tsk, addr, &data, sizeof(data), 1); 1118 return (copied == sizeof(data)) ? 0 : -EIO; 1119} 1120 1121#if defined CONFIG_COMPAT 1122 1123int compat_ptrace_request(struct task_struct *child, compat_long_t request, 1124 compat_ulong_t addr, compat_ulong_t data) 1125{ 1126 compat_ulong_t __user *datap = compat_ptr(data); 1127 compat_ulong_t word; 1128 siginfo_t siginfo; 1129 int ret; 1130 1131 switch (request) { 1132 case PTRACE_PEEKTEXT: 1133 case PTRACE_PEEKDATA: 1134 ret = access_process_vm(child, addr, &word, sizeof(word), 0); 1135 if (ret != sizeof(word)) 1136 ret = -EIO; 1137 else 1138 ret = put_user(word, datap); 1139 break; 1140 1141 case PTRACE_POKETEXT: 1142 case PTRACE_POKEDATA: 1143 ret = access_process_vm(child, addr, &data, sizeof(data), 1); 1144 ret = (ret != sizeof(data) ? -EIO : 0); 1145 break; 1146 1147 case PTRACE_GETEVENTMSG: 1148 ret = put_user((compat_ulong_t) child->ptrace_message, datap); 1149 break; 1150 1151 case PTRACE_GETSIGINFO: 1152 ret = ptrace_getsiginfo(child, &siginfo); 1153 if (!ret) 1154 ret = copy_siginfo_to_user32( 1155 (struct compat_siginfo __user *) datap, 1156 &siginfo); 1157 break; 1158 1159 case PTRACE_SETSIGINFO: 1160 memset(&siginfo, 0, sizeof siginfo); 1161 if (copy_siginfo_from_user32( 1162 &siginfo, (struct compat_siginfo __user *) datap)) 1163 ret = -EFAULT; 1164 else 1165 ret = ptrace_setsiginfo(child, &siginfo); 1166 break; 1167#ifdef CONFIG_HAVE_ARCH_TRACEHOOK 1168 case PTRACE_GETREGSET: 1169 case PTRACE_SETREGSET: 1170 { 1171 struct iovec kiov; 1172 struct compat_iovec __user *uiov = 1173 (struct compat_iovec __user *) datap; 1174 compat_uptr_t ptr; 1175 compat_size_t len; 1176 1177 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) 1178 return -EFAULT; 1179 1180 if (__get_user(ptr, &uiov->iov_base) || 1181 __get_user(len, &uiov->iov_len)) 1182 return -EFAULT; 1183 1184 kiov.iov_base = compat_ptr(ptr); 1185 kiov.iov_len = len; 1186 1187 ret = ptrace_regset(child, request, addr, &kiov); 1188 if (!ret) 1189 ret = __put_user(kiov.iov_len, &uiov->iov_len); 1190 break; 1191 } 1192#endif 1193 1194 default: 1195 ret = ptrace_request(child, request, addr, data); 1196 } 1197 1198 return ret; 1199} 1200 1201COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid, 1202 compat_long_t, addr, compat_long_t, data) 1203{ 1204 struct task_struct *child; 1205 long ret; 1206 1207 if (request == PTRACE_TRACEME) { 1208 ret = ptrace_traceme(); 1209 goto out; 1210 } 1211 1212 child = ptrace_get_task_struct(pid); 1213 if (IS_ERR(child)) { 1214 ret = PTR_ERR(child); 1215 goto out; 1216 } 1217 1218 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { 1219 ret = ptrace_attach(child, request, addr, data); 1220 /* 1221 * Some architectures need to do book-keeping after 1222 * a ptrace attach. 1223 */ 1224 if (!ret) 1225 arch_ptrace_attach(child); 1226 goto out_put_task_struct; 1227 } 1228 1229 ret = ptrace_check_attach(child, request == PTRACE_KILL || 1230 request == PTRACE_INTERRUPT); 1231 if (!ret) { 1232 ret = compat_arch_ptrace(child, request, addr, data); 1233 if (ret || request != PTRACE_DETACH) 1234 ptrace_unfreeze_traced(child); 1235 } 1236 1237 out_put_task_struct: 1238 put_task_struct(child); 1239 out: 1240 return ret; 1241} 1242#endif /* CONFIG_COMPAT */ 1243