1/* 2 * arch/tile/kernel/kprobes.c 3 * Kprobes on TILE-Gx 4 * 5 * Some portions copied from the MIPS version. 6 * 7 * Copyright (C) IBM Corporation, 2002, 2004 8 * Copyright 2006 Sony Corp. 9 * Copyright 2010 Cavium Networks 10 * 11 * Copyright 2012 Tilera Corporation. All Rights Reserved. 12 * 13 * This program is free software; you can redistribute it and/or 14 * modify it under the terms of the GNU General Public License 15 * as published by the Free Software Foundation, version 2. 16 * 17 * This program is distributed in the hope that it will be useful, but 18 * WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 20 * NON INFRINGEMENT. See the GNU General Public License for 21 * more details. 22 */ 23 24#include <linux/kprobes.h> 25#include <linux/kdebug.h> 26#include <linux/module.h> 27#include <linux/slab.h> 28#include <linux/uaccess.h> 29#include <asm/cacheflush.h> 30 31#include <arch/opcode.h> 32 33DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 34DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 35 36tile_bundle_bits breakpoint_insn = TILEGX_BPT_BUNDLE; 37tile_bundle_bits breakpoint2_insn = TILEGX_BPT_BUNDLE | DIE_SSTEPBP; 38 39/* 40 * Check whether instruction is branch or jump, or if executing it 41 * has different results depending on where it is executed (e.g. lnk). 42 */ 43static int __kprobes insn_has_control(kprobe_opcode_t insn) 44{ 45 if (get_Mode(insn) != 0) { /* Y-format bundle */ 46 if (get_Opcode_Y1(insn) != RRR_1_OPCODE_Y1 || 47 get_RRROpcodeExtension_Y1(insn) != UNARY_RRR_1_OPCODE_Y1) 48 return 0; 49 50 switch (get_UnaryOpcodeExtension_Y1(insn)) { 51 case JALRP_UNARY_OPCODE_Y1: 52 case JALR_UNARY_OPCODE_Y1: 53 case JRP_UNARY_OPCODE_Y1: 54 case JR_UNARY_OPCODE_Y1: 55 case LNK_UNARY_OPCODE_Y1: 56 return 1; 57 default: 58 return 0; 59 } 60 } 61 62 switch (get_Opcode_X1(insn)) { 63 case BRANCH_OPCODE_X1: /* branch instructions */ 64 case JUMP_OPCODE_X1: /* jump instructions: j and jal */ 65 return 1; 66 67 case RRR_0_OPCODE_X1: /* other jump instructions */ 68 if (get_RRROpcodeExtension_X1(insn) != UNARY_RRR_0_OPCODE_X1) 69 return 0; 70 switch (get_UnaryOpcodeExtension_X1(insn)) { 71 case JALRP_UNARY_OPCODE_X1: 72 case JALR_UNARY_OPCODE_X1: 73 case JRP_UNARY_OPCODE_X1: 74 case JR_UNARY_OPCODE_X1: 75 case LNK_UNARY_OPCODE_X1: 76 return 1; 77 default: 78 return 0; 79 } 80 default: 81 return 0; 82 } 83} 84 85int __kprobes arch_prepare_kprobe(struct kprobe *p) 86{ 87 unsigned long addr = (unsigned long)p->addr; 88 89 if (addr & (sizeof(kprobe_opcode_t) - 1)) 90 return -EINVAL; 91 92 if (insn_has_control(*p->addr)) { 93 pr_notice("Kprobes for control instructions are not supported\n"); 94 return -EINVAL; 95 } 96 97 /* insn: must be on special executable page on tile. */ 98 p->ainsn.insn = get_insn_slot(); 99 if (!p->ainsn.insn) 100 return -ENOMEM; 101 102 /* 103 * In the kprobe->ainsn.insn[] array we store the original 104 * instruction at index zero and a break trap instruction at 105 * index one. 106 */ 107 memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t)); 108 p->ainsn.insn[1] = breakpoint2_insn; 109 p->opcode = *p->addr; 110 111 return 0; 112} 113 114void __kprobes arch_arm_kprobe(struct kprobe *p) 115{ 116 unsigned long addr_wr; 117 118 /* Operate on writable kernel text mapping. */ 119 addr_wr = (unsigned long)p->addr - MEM_SV_START + PAGE_OFFSET; 120 121 if (probe_kernel_write((void *)addr_wr, &breakpoint_insn, 122 sizeof(breakpoint_insn))) 123 pr_err("%s: failed to enable kprobe\n", __func__); 124 125 smp_wmb(); 126 flush_insn_slot(p); 127} 128 129void __kprobes arch_disarm_kprobe(struct kprobe *kp) 130{ 131 unsigned long addr_wr; 132 133 /* Operate on writable kernel text mapping. */ 134 addr_wr = (unsigned long)kp->addr - MEM_SV_START + PAGE_OFFSET; 135 136 if (probe_kernel_write((void *)addr_wr, &kp->opcode, 137 sizeof(kp->opcode))) 138 pr_err("%s: failed to enable kprobe\n", __func__); 139 140 smp_wmb(); 141 flush_insn_slot(kp); 142} 143 144void __kprobes arch_remove_kprobe(struct kprobe *p) 145{ 146 if (p->ainsn.insn) { 147 free_insn_slot(p->ainsn.insn, 0); 148 p->ainsn.insn = NULL; 149 } 150} 151 152static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) 153{ 154 kcb->prev_kprobe.kp = kprobe_running(); 155 kcb->prev_kprobe.status = kcb->kprobe_status; 156 kcb->prev_kprobe.saved_pc = kcb->kprobe_saved_pc; 157} 158 159static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 160{ 161 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); 162 kcb->kprobe_status = kcb->prev_kprobe.status; 163 kcb->kprobe_saved_pc = kcb->prev_kprobe.saved_pc; 164} 165 166static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, 167 struct kprobe_ctlblk *kcb) 168{ 169 __this_cpu_write(current_kprobe, p); 170 kcb->kprobe_saved_pc = regs->pc; 171} 172 173static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 174{ 175 /* Single step inline if the instruction is a break. */ 176 if (p->opcode == breakpoint_insn || 177 p->opcode == breakpoint2_insn) 178 regs->pc = (unsigned long)p->addr; 179 else 180 regs->pc = (unsigned long)&p->ainsn.insn[0]; 181} 182 183static int __kprobes kprobe_handler(struct pt_regs *regs) 184{ 185 struct kprobe *p; 186 int ret = 0; 187 kprobe_opcode_t *addr; 188 struct kprobe_ctlblk *kcb; 189 190 addr = (kprobe_opcode_t *)regs->pc; 191 192 /* 193 * We don't want to be preempted for the entire 194 * duration of kprobe processing. 195 */ 196 preempt_disable(); 197 kcb = get_kprobe_ctlblk(); 198 199 /* Check we're not actually recursing. */ 200 if (kprobe_running()) { 201 p = get_kprobe(addr); 202 if (p) { 203 if (kcb->kprobe_status == KPROBE_HIT_SS && 204 p->ainsn.insn[0] == breakpoint_insn) { 205 goto no_kprobe; 206 } 207 /* 208 * We have reentered the kprobe_handler(), since 209 * another probe was hit while within the handler. 210 * We here save the original kprobes variables and 211 * just single step on the instruction of the new probe 212 * without calling any user handlers. 213 */ 214 save_previous_kprobe(kcb); 215 set_current_kprobe(p, regs, kcb); 216 kprobes_inc_nmissed_count(p); 217 prepare_singlestep(p, regs); 218 kcb->kprobe_status = KPROBE_REENTER; 219 return 1; 220 } else { 221 if (*addr != breakpoint_insn) { 222 /* 223 * The breakpoint instruction was removed by 224 * another cpu right after we hit, no further 225 * handling of this interrupt is appropriate. 226 */ 227 ret = 1; 228 goto no_kprobe; 229 } 230 p = __this_cpu_read(current_kprobe); 231 if (p->break_handler && p->break_handler(p, regs)) 232 goto ss_probe; 233 } 234 goto no_kprobe; 235 } 236 237 p = get_kprobe(addr); 238 if (!p) { 239 if (*addr != breakpoint_insn) { 240 /* 241 * The breakpoint instruction was removed right 242 * after we hit it. Another cpu has removed 243 * either a probepoint or a debugger breakpoint 244 * at this address. In either case, no further 245 * handling of this interrupt is appropriate. 246 */ 247 ret = 1; 248 } 249 /* Not one of ours: let kernel handle it. */ 250 goto no_kprobe; 251 } 252 253 set_current_kprobe(p, regs, kcb); 254 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 255 256 if (p->pre_handler && p->pre_handler(p, regs)) { 257 /* Handler has already set things up, so skip ss setup. */ 258 return 1; 259 } 260 261ss_probe: 262 prepare_singlestep(p, regs); 263 kcb->kprobe_status = KPROBE_HIT_SS; 264 return 1; 265 266no_kprobe: 267 preempt_enable_no_resched(); 268 return ret; 269} 270 271/* 272 * Called after single-stepping. p->addr is the address of the 273 * instruction that has been replaced by the breakpoint. To avoid the 274 * SMP problems that can occur when we temporarily put back the 275 * original opcode to single-step, we single-stepped a copy of the 276 * instruction. The address of this copy is p->ainsn.insn. 277 * 278 * This function prepares to return from the post-single-step 279 * breakpoint trap. 280 */ 281static void __kprobes resume_execution(struct kprobe *p, 282 struct pt_regs *regs, 283 struct kprobe_ctlblk *kcb) 284{ 285 unsigned long orig_pc = kcb->kprobe_saved_pc; 286 regs->pc = orig_pc + 8; 287} 288 289static inline int post_kprobe_handler(struct pt_regs *regs) 290{ 291 struct kprobe *cur = kprobe_running(); 292 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 293 294 if (!cur) 295 return 0; 296 297 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { 298 kcb->kprobe_status = KPROBE_HIT_SSDONE; 299 cur->post_handler(cur, regs, 0); 300 } 301 302 resume_execution(cur, regs, kcb); 303 304 /* Restore back the original saved kprobes variables and continue. */ 305 if (kcb->kprobe_status == KPROBE_REENTER) { 306 restore_previous_kprobe(kcb); 307 goto out; 308 } 309 reset_current_kprobe(); 310out: 311 preempt_enable_no_resched(); 312 313 return 1; 314} 315 316static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) 317{ 318 struct kprobe *cur = kprobe_running(); 319 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 320 321 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) 322 return 1; 323 324 if (kcb->kprobe_status & KPROBE_HIT_SS) { 325 /* 326 * We are here because the instruction being single 327 * stepped caused a page fault. We reset the current 328 * kprobe and the ip points back to the probe address 329 * and allow the page fault handler to continue as a 330 * normal page fault. 331 */ 332 resume_execution(cur, regs, kcb); 333 reset_current_kprobe(); 334 preempt_enable_no_resched(); 335 } 336 return 0; 337} 338 339/* 340 * Wrapper routine for handling exceptions. 341 */ 342int __kprobes kprobe_exceptions_notify(struct notifier_block *self, 343 unsigned long val, void *data) 344{ 345 struct die_args *args = (struct die_args *)data; 346 int ret = NOTIFY_DONE; 347 348 switch (val) { 349 case DIE_BREAK: 350 if (kprobe_handler(args->regs)) 351 ret = NOTIFY_STOP; 352 break; 353 case DIE_SSTEPBP: 354 if (post_kprobe_handler(args->regs)) 355 ret = NOTIFY_STOP; 356 break; 357 case DIE_PAGE_FAULT: 358 /* kprobe_running() needs smp_processor_id(). */ 359 preempt_disable(); 360 361 if (kprobe_running() 362 && kprobe_fault_handler(args->regs, args->trapnr)) 363 ret = NOTIFY_STOP; 364 preempt_enable(); 365 break; 366 default: 367 break; 368 } 369 return ret; 370} 371 372int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 373{ 374 struct jprobe *jp = container_of(p, struct jprobe, kp); 375 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 376 377 kcb->jprobe_saved_regs = *regs; 378 kcb->jprobe_saved_sp = regs->sp; 379 380 memcpy(kcb->jprobes_stack, (void *)kcb->jprobe_saved_sp, 381 MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp)); 382 383 regs->pc = (unsigned long)(jp->entry); 384 385 return 1; 386} 387 388/* Defined in the inline asm below. */ 389void jprobe_return_end(void); 390 391void __kprobes jprobe_return(void) 392{ 393 asm volatile( 394 "bpt\n\t" 395 ".globl jprobe_return_end\n" 396 "jprobe_return_end:\n"); 397} 398 399int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 400{ 401 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 402 403 if (regs->pc >= (unsigned long)jprobe_return && 404 regs->pc <= (unsigned long)jprobe_return_end) { 405 *regs = kcb->jprobe_saved_regs; 406 memcpy((void *)kcb->jprobe_saved_sp, kcb->jprobes_stack, 407 MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp)); 408 preempt_enable_no_resched(); 409 410 return 1; 411 } 412 return 0; 413} 414 415/* 416 * Function return probe trampoline: 417 * - init_kprobes() establishes a probepoint here 418 * - When the probed function returns, this probe causes the 419 * handlers to fire 420 */ 421static void __used kretprobe_trampoline_holder(void) 422{ 423 asm volatile( 424 "nop\n\t" 425 ".global kretprobe_trampoline\n" 426 "kretprobe_trampoline:\n\t" 427 "nop\n\t" 428 : : : "memory"); 429} 430 431void kretprobe_trampoline(void); 432 433void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 434 struct pt_regs *regs) 435{ 436 ri->ret_addr = (kprobe_opcode_t *) regs->lr; 437 438 /* Replace the return addr with trampoline addr */ 439 regs->lr = (unsigned long)kretprobe_trampoline; 440} 441 442/* 443 * Called when the probe at kretprobe trampoline is hit. 444 */ 445static int __kprobes trampoline_probe_handler(struct kprobe *p, 446 struct pt_regs *regs) 447{ 448 struct kretprobe_instance *ri = NULL; 449 struct hlist_head *head, empty_rp; 450 struct hlist_node *tmp; 451 unsigned long flags, orig_ret_address = 0; 452 unsigned long trampoline_address = (unsigned long)kretprobe_trampoline; 453 454 INIT_HLIST_HEAD(&empty_rp); 455 kretprobe_hash_lock(current, &head, &flags); 456 457 /* 458 * It is possible to have multiple instances associated with a given 459 * task either because multiple functions in the call path have 460 * a return probe installed on them, and/or more than one return 461 * return probe was registered for a target function. 462 * 463 * We can handle this because: 464 * - instances are always inserted at the head of the list 465 * - when multiple return probes are registered for the same 466 * function, the first instance's ret_addr will point to the 467 * real return address, and all the rest will point to 468 * kretprobe_trampoline 469 */ 470 hlist_for_each_entry_safe(ri, tmp, head, hlist) { 471 if (ri->task != current) 472 /* another task is sharing our hash bucket */ 473 continue; 474 475 if (ri->rp && ri->rp->handler) 476 ri->rp->handler(ri, regs); 477 478 orig_ret_address = (unsigned long)ri->ret_addr; 479 recycle_rp_inst(ri, &empty_rp); 480 481 if (orig_ret_address != trampoline_address) { 482 /* 483 * This is the real return address. Any other 484 * instances associated with this task are for 485 * other calls deeper on the call stack 486 */ 487 break; 488 } 489 } 490 491 kretprobe_assert(ri, orig_ret_address, trampoline_address); 492 instruction_pointer(regs) = orig_ret_address; 493 494 reset_current_kprobe(); 495 kretprobe_hash_unlock(current, &flags); 496 preempt_enable_no_resched(); 497 498 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { 499 hlist_del(&ri->hlist); 500 kfree(ri); 501 } 502 /* 503 * By returning a non-zero value, we are telling 504 * kprobe_handler() that we don't want the post_handler 505 * to run (and have re-enabled preemption) 506 */ 507 return 1; 508} 509 510int __kprobes arch_trampoline_kprobe(struct kprobe *p) 511{ 512 if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline) 513 return 1; 514 515 return 0; 516} 517 518static struct kprobe trampoline_p = { 519 .addr = (kprobe_opcode_t *)kretprobe_trampoline, 520 .pre_handler = trampoline_probe_handler 521}; 522 523int __init arch_init_kprobes(void) 524{ 525 register_kprobe(&trampoline_p); 526 return 0; 527} 528