1#ifndef _LINUX_PTRACE_H 2#define _LINUX_PTRACE_H 3 4#include <linux/compiler.h> /* For unlikely. */ 5#include <linux/sched.h> /* For struct task_struct. */ 6#include <linux/err.h> /* for IS_ERR_VALUE */ 7#include <linux/bug.h> /* For BUG_ON. */ 8#include <linux/pid_namespace.h> /* For task_active_pid_ns. */ 9#include <uapi/linux/ptrace.h> 10 11/* 12 * Ptrace flags 13 * 14 * The owner ship rules for task->ptrace which holds the ptrace 15 * flags is simple. When a task is running it owns it's task->ptrace 16 * flags. When the a task is stopped the ptracer owns task->ptrace. 17 */ 18 19#define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */ 20#define PT_PTRACED 0x00000001 21#define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */ 22#define PT_PTRACE_CAP 0x00000004 /* ptracer can follow suid-exec */ 23 24#define PT_OPT_FLAG_SHIFT 3 25/* PT_TRACE_* event enable flags */ 26#define PT_EVENT_FLAG(event) (1 << (PT_OPT_FLAG_SHIFT + (event))) 27#define PT_TRACESYSGOOD PT_EVENT_FLAG(0) 28#define PT_TRACE_FORK PT_EVENT_FLAG(PTRACE_EVENT_FORK) 29#define PT_TRACE_VFORK PT_EVENT_FLAG(PTRACE_EVENT_VFORK) 30#define PT_TRACE_CLONE PT_EVENT_FLAG(PTRACE_EVENT_CLONE) 31#define PT_TRACE_EXEC PT_EVENT_FLAG(PTRACE_EVENT_EXEC) 32#define PT_TRACE_VFORK_DONE PT_EVENT_FLAG(PTRACE_EVENT_VFORK_DONE) 33#define PT_TRACE_EXIT PT_EVENT_FLAG(PTRACE_EVENT_EXIT) 34#define PT_TRACE_SECCOMP PT_EVENT_FLAG(PTRACE_EVENT_SECCOMP) 35 36#define PT_EXITKILL (PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT) 37#define PT_SUSPEND_SECCOMP (PTRACE_O_SUSPEND_SECCOMP << PT_OPT_FLAG_SHIFT) 38 39/* single stepping state bits (used on ARM and PA-RISC) */ 40#define PT_SINGLESTEP_BIT 31 41#define PT_SINGLESTEP (1<<PT_SINGLESTEP_BIT) 42#define PT_BLOCKSTEP_BIT 30 43#define PT_BLOCKSTEP (1<<PT_BLOCKSTEP_BIT) 44 45extern long arch_ptrace(struct task_struct *child, long request, 46 unsigned long addr, unsigned long data); 47extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); 48extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); 49extern void ptrace_disable(struct task_struct *); 50extern int ptrace_request(struct task_struct *child, long request, 51 unsigned long addr, unsigned long data); 52extern void ptrace_notify(int exit_code); 53extern void __ptrace_link(struct task_struct *child, 54 struct task_struct *new_parent); 55extern void __ptrace_unlink(struct task_struct *child); 56extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead); 57#define PTRACE_MODE_READ 0x01 58#define PTRACE_MODE_ATTACH 0x02 59#define PTRACE_MODE_NOAUDIT 0x04 60#define PTRACE_MODE_FSCREDS 0x08 61#define PTRACE_MODE_REALCREDS 0x10 62 63/* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */ 64#define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS) 65#define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS) 66#define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS) 67#define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS) 68 69/** 70 * ptrace_may_access - check whether the caller is permitted to access 71 * a target task. 72 * @task: target task 73 * @mode: selects type of access and caller credentials 74 * 75 * Returns true on success, false on denial. 76 * 77 * One of the flags PTRACE_MODE_FSCREDS and PTRACE_MODE_REALCREDS must 78 * be set in @mode to specify whether the access was requested through 79 * a filesystem syscall (should use effective capabilities and fsuid 80 * of the caller) or through an explicit syscall such as 81 * process_vm_writev or ptrace (and should use the real credentials). 82 */ 83extern bool ptrace_may_access(struct task_struct *task, unsigned int mode); 84 85static inline int ptrace_reparented(struct task_struct *child) 86{ 87 return !same_thread_group(child->real_parent, child->parent); 88} 89 90static inline void ptrace_unlink(struct task_struct *child) 91{ 92 if (unlikely(child->ptrace)) 93 __ptrace_unlink(child); 94} 95 96int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, 97 unsigned long data); 98int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, 99 unsigned long data); 100 101/** 102 * ptrace_parent - return the task that is tracing the given task 103 * @task: task to consider 104 * 105 * Returns %NULL if no one is tracing @task, or the &struct task_struct 106 * pointer to its tracer. 107 * 108 * Must called under rcu_read_lock(). The pointer returned might be kept 109 * live only by RCU. During exec, this may be called with task_lock() held 110 * on @task, still held from when check_unsafe_exec() was called. 111 */ 112static inline struct task_struct *ptrace_parent(struct task_struct *task) 113{ 114 if (unlikely(task->ptrace)) 115 return rcu_dereference(task->parent); 116 return NULL; 117} 118 119/** 120 * ptrace_event_enabled - test whether a ptrace event is enabled 121 * @task: ptracee of interest 122 * @event: %PTRACE_EVENT_* to test 123 * 124 * Test whether @event is enabled for ptracee @task. 125 * 126 * Returns %true if @event is enabled, %false otherwise. 127 */ 128static inline bool ptrace_event_enabled(struct task_struct *task, int event) 129{ 130 return task->ptrace & PT_EVENT_FLAG(event); 131} 132 133/** 134 * ptrace_event - possibly stop for a ptrace event notification 135 * @event: %PTRACE_EVENT_* value to report 136 * @message: value for %PTRACE_GETEVENTMSG to return 137 * 138 * Check whether @event is enabled and, if so, report @event and @message 139 * to the ptrace parent. 140 * 141 * Called without locks. 142 */ 143static inline void ptrace_event(int event, unsigned long message) 144{ 145 if (unlikely(ptrace_event_enabled(current, event))) { 146 current->ptrace_message = message; 147 ptrace_notify((event << 8) | SIGTRAP); 148 } else if (event == PTRACE_EVENT_EXEC) { 149 /* legacy EXEC report via SIGTRAP */ 150 if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED) 151 send_sig(SIGTRAP, current, 0); 152 } 153} 154 155/** 156 * ptrace_event_pid - possibly stop for a ptrace event notification 157 * @event: %PTRACE_EVENT_* value to report 158 * @pid: process identifier for %PTRACE_GETEVENTMSG to return 159 * 160 * Check whether @event is enabled and, if so, report @event and @pid 161 * to the ptrace parent. @pid is reported as the pid_t seen from the 162 * the ptrace parent's pid namespace. 163 * 164 * Called without locks. 165 */ 166static inline void ptrace_event_pid(int event, struct pid *pid) 167{ 168 /* 169 * FIXME: There's a potential race if a ptracer in a different pid 170 * namespace than parent attaches between computing message below and 171 * when we acquire tasklist_lock in ptrace_stop(). If this happens, 172 * the ptracer will get a bogus pid from PTRACE_GETEVENTMSG. 173 */ 174 unsigned long message = 0; 175 struct pid_namespace *ns; 176 177 rcu_read_lock(); 178 ns = task_active_pid_ns(rcu_dereference(current->parent)); 179 if (ns) 180 message = pid_nr_ns(pid, ns); 181 rcu_read_unlock(); 182 183 ptrace_event(event, message); 184} 185 186/** 187 * ptrace_init_task - initialize ptrace state for a new child 188 * @child: new child task 189 * @ptrace: true if child should be ptrace'd by parent's tracer 190 * 191 * This is called immediately after adding @child to its parent's children 192 * list. @ptrace is false in the normal case, and true to ptrace @child. 193 * 194 * Called with current's siglock and write_lock_irq(&tasklist_lock) held. 195 */ 196static inline void ptrace_init_task(struct task_struct *child, bool ptrace) 197{ 198 INIT_LIST_HEAD(&child->ptrace_entry); 199 INIT_LIST_HEAD(&child->ptraced); 200 child->jobctl = 0; 201 child->ptrace = 0; 202 child->parent = child->real_parent; 203 204 if (unlikely(ptrace) && current->ptrace) { 205 child->ptrace = current->ptrace; 206 __ptrace_link(child, current->parent); 207 208 if (child->ptrace & PT_SEIZED) 209 task_set_jobctl_pending(child, JOBCTL_TRAP_STOP); 210 else 211 sigaddset(&child->pending.signal, SIGSTOP); 212 213 set_tsk_thread_flag(child, TIF_SIGPENDING); 214 } 215} 216 217/** 218 * ptrace_release_task - final ptrace-related cleanup of a zombie being reaped 219 * @task: task in %EXIT_DEAD state 220 * 221 * Called with write_lock(&tasklist_lock) held. 222 */ 223static inline void ptrace_release_task(struct task_struct *task) 224{ 225 BUG_ON(!list_empty(&task->ptraced)); 226 ptrace_unlink(task); 227 BUG_ON(!list_empty(&task->ptrace_entry)); 228} 229 230#ifndef force_successful_syscall_return 231/* 232 * System call handlers that, upon successful completion, need to return a 233 * negative value should call force_successful_syscall_return() right before 234 * returning. On architectures where the syscall convention provides for a 235 * separate error flag (e.g., alpha, ia64, ppc{,64}, sparc{,64}, possibly 236 * others), this macro can be used to ensure that the error flag will not get 237 * set. On architectures which do not support a separate error flag, the macro 238 * is a no-op and the spurious error condition needs to be filtered out by some 239 * other means (e.g., in user-level, by passing an extra argument to the 240 * syscall handler, or something along those lines). 241 */ 242#define force_successful_syscall_return() do { } while (0) 243#endif 244 245#ifndef is_syscall_success 246/* 247 * On most systems we can tell if a syscall is a success based on if the retval 248 * is an error value. On some systems like ia64 and powerpc they have different 249 * indicators of success/failure and must define their own. 250 */ 251#define is_syscall_success(regs) (!IS_ERR_VALUE((unsigned long)(regs_return_value(regs)))) 252#endif 253 254/* 255 * <asm/ptrace.h> should define the following things inside #ifdef __KERNEL__. 256 * 257 * These do-nothing inlines are used when the arch does not 258 * implement single-step. The kerneldoc comments are here 259 * to document the interface for all arch definitions. 260 */ 261 262#ifndef arch_has_single_step 263/** 264 * arch_has_single_step - does this CPU support user-mode single-step? 265 * 266 * If this is defined, then there must be function declarations or 267 * inlines for user_enable_single_step() and user_disable_single_step(). 268 * arch_has_single_step() should evaluate to nonzero iff the machine 269 * supports instruction single-step for user mode. 270 * It can be a constant or it can test a CPU feature bit. 271 */ 272#define arch_has_single_step() (0) 273 274/** 275 * user_enable_single_step - single-step in user-mode task 276 * @task: either current or a task stopped in %TASK_TRACED 277 * 278 * This can only be called when arch_has_single_step() has returned nonzero. 279 * Set @task so that when it returns to user mode, it will trap after the 280 * next single instruction executes. If arch_has_block_step() is defined, 281 * this must clear the effects of user_enable_block_step() too. 282 */ 283static inline void user_enable_single_step(struct task_struct *task) 284{ 285 BUG(); /* This can never be called. */ 286} 287 288/** 289 * user_disable_single_step - cancel user-mode single-step 290 * @task: either current or a task stopped in %TASK_TRACED 291 * 292 * Clear @task of the effects of user_enable_single_step() and 293 * user_enable_block_step(). This can be called whether or not either 294 * of those was ever called on @task, and even if arch_has_single_step() 295 * returned zero. 296 */ 297static inline void user_disable_single_step(struct task_struct *task) 298{ 299} 300#else 301extern void user_enable_single_step(struct task_struct *); 302extern void user_disable_single_step(struct task_struct *); 303#endif /* arch_has_single_step */ 304 305#ifndef arch_has_block_step 306/** 307 * arch_has_block_step - does this CPU support user-mode block-step? 308 * 309 * If this is defined, then there must be a function declaration or inline 310 * for user_enable_block_step(), and arch_has_single_step() must be defined 311 * too. arch_has_block_step() should evaluate to nonzero iff the machine 312 * supports step-until-branch for user mode. It can be a constant or it 313 * can test a CPU feature bit. 314 */ 315#define arch_has_block_step() (0) 316 317/** 318 * user_enable_block_step - step until branch in user-mode task 319 * @task: either current or a task stopped in %TASK_TRACED 320 * 321 * This can only be called when arch_has_block_step() has returned nonzero, 322 * and will never be called when single-instruction stepping is being used. 323 * Set @task so that when it returns to user mode, it will trap after the 324 * next branch or trap taken. 325 */ 326static inline void user_enable_block_step(struct task_struct *task) 327{ 328 BUG(); /* This can never be called. */ 329} 330#else 331extern void user_enable_block_step(struct task_struct *); 332#endif /* arch_has_block_step */ 333 334#ifdef ARCH_HAS_USER_SINGLE_STEP_INFO 335extern void user_single_step_siginfo(struct task_struct *tsk, 336 struct pt_regs *regs, siginfo_t *info); 337#else 338static inline void user_single_step_siginfo(struct task_struct *tsk, 339 struct pt_regs *regs, siginfo_t *info) 340{ 341 memset(info, 0, sizeof(*info)); 342 info->si_signo = SIGTRAP; 343} 344#endif 345 346#ifndef arch_ptrace_stop_needed 347/** 348 * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called 349 * @code: current->exit_code value ptrace will stop with 350 * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with 351 * 352 * This is called with the siglock held, to decide whether or not it's 353 * necessary to release the siglock and call arch_ptrace_stop() with the 354 * same @code and @info arguments. It can be defined to a constant if 355 * arch_ptrace_stop() is never required, or always is. On machines where 356 * this makes sense, it should be defined to a quick test to optimize out 357 * calling arch_ptrace_stop() when it would be superfluous. For example, 358 * if the thread has not been back to user mode since the last stop, the 359 * thread state might indicate that nothing needs to be done. 360 * 361 * This is guaranteed to be invoked once before a task stops for ptrace and 362 * may include arch-specific operations necessary prior to a ptrace stop. 363 */ 364#define arch_ptrace_stop_needed(code, info) (0) 365#endif 366 367#ifndef arch_ptrace_stop 368/** 369 * arch_ptrace_stop - Do machine-specific work before stopping for ptrace 370 * @code: current->exit_code value ptrace will stop with 371 * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with 372 * 373 * This is called with no locks held when arch_ptrace_stop_needed() has 374 * just returned nonzero. It is allowed to block, e.g. for user memory 375 * access. The arch can have machine-specific work to be done before 376 * ptrace stops. On ia64, register backing store gets written back to user 377 * memory here. Since this can be costly (requires dropping the siglock), 378 * we only do it when the arch requires it for this particular stop, as 379 * indicated by arch_ptrace_stop_needed(). 380 */ 381#define arch_ptrace_stop(code, info) do { } while (0) 382#endif 383 384#ifndef current_pt_regs 385#define current_pt_regs() task_pt_regs(current) 386#endif 387 388#ifndef ptrace_signal_deliver 389#define ptrace_signal_deliver() ((void)0) 390#endif 391 392/* 393 * unlike current_pt_regs(), this one is equal to task_pt_regs(current) 394 * on *all* architectures; the only reason to have a per-arch definition 395 * is optimisation. 396 */ 397#ifndef signal_pt_regs 398#define signal_pt_regs() task_pt_regs(current) 399#endif 400 401#ifndef current_user_stack_pointer 402#define current_user_stack_pointer() user_stack_pointer(current_pt_regs()) 403#endif 404 405extern int task_current_syscall(struct task_struct *target, long *callno, 406 unsigned long args[6], unsigned int maxargs, 407 unsigned long *sp, unsigned long *pc); 408 409#endif 410