1/* 2 * Kernel support for the ptrace() and syscall tracing interfaces. 3 * 4 * Copyright (C) 2000 Hewlett-Packard Co, Linuxcare Inc. 5 * Copyright (C) 2000 Matthew Wilcox <matthew@wil.cx> 6 * Copyright (C) 2000 David Huggins-Daines <dhd@debian.org> 7 * Copyright (C) 2008 Helge Deller <deller@gmx.de> 8 */ 9 10#include <linux/kernel.h> 11#include <linux/sched.h> 12#include <linux/mm.h> 13#include <linux/smp.h> 14#include <linux/errno.h> 15#include <linux/ptrace.h> 16#include <linux/tracehook.h> 17#include <linux/user.h> 18#include <linux/personality.h> 19#include <linux/security.h> 20#include <linux/seccomp.h> 21#include <linux/compat.h> 22#include <linux/signal.h> 23#include <linux/audit.h> 24 25#include <asm/uaccess.h> 26#include <asm/pgtable.h> 27#include <asm/processor.h> 28#include <asm/asm-offsets.h> 29 30/* PSW bits we allow the debugger to modify */ 31#define USER_PSW_BITS (PSW_N | PSW_B | PSW_V | PSW_CB) 32 33/* 34 * Called by kernel/ptrace.c when detaching.. 35 * 36 * Make sure single step bits etc are not set. 37 */ 38void ptrace_disable(struct task_struct *task) 39{ 40 clear_tsk_thread_flag(task, TIF_SINGLESTEP); 41 clear_tsk_thread_flag(task, TIF_BLOCKSTEP); 42 43 /* make sure the trap bits are not set */ 44 pa_psw(task)->r = 0; 45 pa_psw(task)->t = 0; 46 pa_psw(task)->h = 0; 47 pa_psw(task)->l = 0; 48} 49 50/* 51 * The following functions are called by ptrace_resume() when 52 * enabling or disabling single/block tracing. 53 */ 54void user_disable_single_step(struct task_struct *task) 55{ 56 ptrace_disable(task); 57} 58 59void user_enable_single_step(struct task_struct *task) 60{ 61 clear_tsk_thread_flag(task, TIF_BLOCKSTEP); 62 set_tsk_thread_flag(task, TIF_SINGLESTEP); 63 64 if (pa_psw(task)->n) { 65 struct siginfo si; 66 67 /* Nullified, just crank over the queue. */ 68 task_regs(task)->iaoq[0] = task_regs(task)->iaoq[1]; 69 task_regs(task)->iasq[0] = task_regs(task)->iasq[1]; 70 task_regs(task)->iaoq[1] = task_regs(task)->iaoq[0] + 4; 71 pa_psw(task)->n = 0; 72 pa_psw(task)->x = 0; 73 pa_psw(task)->y = 0; 74 pa_psw(task)->z = 0; 75 pa_psw(task)->b = 0; 76 ptrace_disable(task); 77 /* Don't wake up the task, but let the 78 parent know something happened. */ 79 si.si_code = TRAP_TRACE; 80 si.si_addr = (void __user *) (task_regs(task)->iaoq[0] & ~3); 81 si.si_signo = SIGTRAP; 82 si.si_errno = 0; 83 force_sig_info(SIGTRAP, &si, task); 84 /* notify_parent(task, SIGCHLD); */ 85 return; 86 } 87 88 /* Enable recovery counter traps. The recovery counter 89 * itself will be set to zero on a task switch. If the 90 * task is suspended on a syscall then the syscall return 91 * path will overwrite the recovery counter with a suitable 92 * value such that it traps once back in user space. We 93 * disable interrupts in the tasks PSW here also, to avoid 94 * interrupts while the recovery counter is decrementing. 95 */ 96 pa_psw(task)->r = 1; 97 pa_psw(task)->t = 0; 98 pa_psw(task)->h = 0; 99 pa_psw(task)->l = 0; 100} 101 102void user_enable_block_step(struct task_struct *task) 103{ 104 clear_tsk_thread_flag(task, TIF_SINGLESTEP); 105 set_tsk_thread_flag(task, TIF_BLOCKSTEP); 106 107 /* Enable taken branch trap. */ 108 pa_psw(task)->r = 0; 109 pa_psw(task)->t = 1; 110 pa_psw(task)->h = 0; 111 pa_psw(task)->l = 0; 112} 113 114long arch_ptrace(struct task_struct *child, long request, 115 unsigned long addr, unsigned long data) 116{ 117 unsigned long tmp; 118 long ret = -EIO; 119 120 switch (request) { 121 122 /* Read the word at location addr in the USER area. For ptraced 123 processes, the kernel saves all regs on a syscall. */ 124 case PTRACE_PEEKUSR: 125 if ((addr & (sizeof(unsigned long)-1)) || 126 addr >= sizeof(struct pt_regs)) 127 break; 128 tmp = *(unsigned long *) ((char *) task_regs(child) + addr); 129 ret = put_user(tmp, (unsigned long __user *) data); 130 break; 131 132 /* Write the word at location addr in the USER area. This will need 133 to change when the kernel no longer saves all regs on a syscall. 134 FIXME. There is a problem at the moment in that r3-r18 are only 135 saved if the process is ptraced on syscall entry, and even then 136 those values are overwritten by actual register values on syscall 137 exit. */ 138 case PTRACE_POKEUSR: 139 /* Some register values written here may be ignored in 140 * entry.S:syscall_restore_rfi; e.g. iaoq is written with 141 * r31/r31+4, and not with the values in pt_regs. 142 */ 143 if (addr == PT_PSW) { 144 /* Allow writing to Nullify, Divide-step-correction, 145 * and carry/borrow bits. 146 * BEWARE, if you set N, and then single step, it won't 147 * stop on the nullified instruction. 148 */ 149 data &= USER_PSW_BITS; 150 task_regs(child)->gr[0] &= ~USER_PSW_BITS; 151 task_regs(child)->gr[0] |= data; 152 ret = 0; 153 break; 154 } 155 156 if ((addr & (sizeof(unsigned long)-1)) || 157 addr >= sizeof(struct pt_regs)) 158 break; 159 if ((addr >= PT_GR1 && addr <= PT_GR31) || 160 addr == PT_IAOQ0 || addr == PT_IAOQ1 || 161 (addr >= PT_FR0 && addr <= PT_FR31 + 4) || 162 addr == PT_SAR) { 163 *(unsigned long *) ((char *) task_regs(child) + addr) = data; 164 ret = 0; 165 } 166 break; 167 168 default: 169 ret = ptrace_request(child, request, addr, data); 170 break; 171 } 172 173 return ret; 174} 175 176 177#ifdef CONFIG_COMPAT 178 179/* This function is needed to translate 32 bit pt_regs offsets in to 180 * 64 bit pt_regs offsets. For example, a 32 bit gdb under a 64 bit kernel 181 * will request offset 12 if it wants gr3, but the lower 32 bits of 182 * the 64 bit kernels view of gr3 will be at offset 28 (3*8 + 4). 183 * This code relies on a 32 bit pt_regs being comprised of 32 bit values 184 * except for the fp registers which (a) are 64 bits, and (b) follow 185 * the gr registers at the start of pt_regs. The 32 bit pt_regs should 186 * be half the size of the 64 bit pt_regs, plus 32*4 to allow for fr[] 187 * being 64 bit in both cases. 188 */ 189 190static compat_ulong_t translate_usr_offset(compat_ulong_t offset) 191{ 192 if (offset < 0) 193 return sizeof(struct pt_regs); 194 else if (offset <= 32*4) /* gr[0..31] */ 195 return offset * 2 + 4; 196 else if (offset <= 32*4+32*8) /* gr[0..31] + fr[0..31] */ 197 return offset + 32*4; 198 else if (offset < sizeof(struct pt_regs)/2 + 32*4) 199 return offset * 2 + 4 - 32*8; 200 else 201 return sizeof(struct pt_regs); 202} 203 204long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 205 compat_ulong_t addr, compat_ulong_t data) 206{ 207 compat_uint_t tmp; 208 long ret = -EIO; 209 210 switch (request) { 211 212 case PTRACE_PEEKUSR: 213 if (addr & (sizeof(compat_uint_t)-1)) 214 break; 215 addr = translate_usr_offset(addr); 216 if (addr >= sizeof(struct pt_regs)) 217 break; 218 219 tmp = *(compat_uint_t *) ((char *) task_regs(child) + addr); 220 ret = put_user(tmp, (compat_uint_t *) (unsigned long) data); 221 break; 222 223 /* Write the word at location addr in the USER area. This will need 224 to change when the kernel no longer saves all regs on a syscall. 225 FIXME. There is a problem at the moment in that r3-r18 are only 226 saved if the process is ptraced on syscall entry, and even then 227 those values are overwritten by actual register values on syscall 228 exit. */ 229 case PTRACE_POKEUSR: 230 /* Some register values written here may be ignored in 231 * entry.S:syscall_restore_rfi; e.g. iaoq is written with 232 * r31/r31+4, and not with the values in pt_regs. 233 */ 234 if (addr == PT_PSW) { 235 /* Since PT_PSW==0, it is valid for 32 bit processes 236 * under 64 bit kernels as well. 237 */ 238 ret = arch_ptrace(child, request, addr, data); 239 } else { 240 if (addr & (sizeof(compat_uint_t)-1)) 241 break; 242 addr = translate_usr_offset(addr); 243 if (addr >= sizeof(struct pt_regs)) 244 break; 245 if (addr >= PT_FR0 && addr <= PT_FR31 + 4) { 246 /* Special case, fp regs are 64 bits anyway */ 247 *(__u64 *) ((char *) task_regs(child) + addr) = data; 248 ret = 0; 249 } 250 else if ((addr >= PT_GR1+4 && addr <= PT_GR31+4) || 251 addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4 || 252 addr == PT_SAR+4) { 253 /* Zero the top 32 bits */ 254 *(__u32 *) ((char *) task_regs(child) + addr - 4) = 0; 255 *(__u32 *) ((char *) task_regs(child) + addr) = data; 256 ret = 0; 257 } 258 } 259 break; 260 261 default: 262 ret = compat_ptrace_request(child, request, addr, data); 263 break; 264 } 265 266 return ret; 267} 268#endif 269 270long do_syscall_trace_enter(struct pt_regs *regs) 271{ 272 /* Do the secure computing check first. */ 273 secure_computing_strict(regs->gr[20]); 274 275 if (test_thread_flag(TIF_SYSCALL_TRACE) && 276 tracehook_report_syscall_entry(regs)) { 277 /* 278 * Tracing decided this syscall should not happen or the 279 * debugger stored an invalid system call number. Skip 280 * the system call and the system call restart handling. 281 */ 282 regs->gr[20] = -1UL; 283 goto out; 284 } 285 286#ifdef CONFIG_64BIT 287 if (!is_compat_task()) 288 audit_syscall_entry(regs->gr[20], regs->gr[26], regs->gr[25], 289 regs->gr[24], regs->gr[23]); 290 else 291#endif 292 audit_syscall_entry(regs->gr[20] & 0xffffffff, 293 regs->gr[26] & 0xffffffff, 294 regs->gr[25] & 0xffffffff, 295 regs->gr[24] & 0xffffffff, 296 regs->gr[23] & 0xffffffff); 297 298out: 299 return regs->gr[20]; 300} 301 302void do_syscall_trace_exit(struct pt_regs *regs) 303{ 304 int stepping = test_thread_flag(TIF_SINGLESTEP) || 305 test_thread_flag(TIF_BLOCKSTEP); 306 307 audit_syscall_exit(regs); 308 309 if (stepping || test_thread_flag(TIF_SYSCALL_TRACE)) 310 tracehook_report_syscall_exit(regs, stepping); 311} 312