root/arch/arc/kernel/kprobes.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. arch_prepare_kprobe
  2. arch_arm_kprobe
  3. arch_disarm_kprobe
  4. arch_remove_kprobe
  5. save_previous_kprobe
  6. restore_previous_kprobe
  7. set_current_kprobe
  8. resume_execution
  9. setup_singlestep
  10. arc_kprobe_handler
  11. arc_post_kprobe_handler
  12. kprobe_fault_handler
  13. kprobe_exceptions_notify
  14. kretprobe_trampoline_holder
  15. arch_prepare_kretprobe
  16. trampoline_probe_handler
  17. arch_init_kprobes
  18. arch_trampoline_kprobe
  19. trap_is_kprobe

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
   4  */
   5 
   6 #include <linux/types.h>
   7 #include <linux/kprobes.h>
   8 #include <linux/slab.h>
   9 #include <linux/module.h>
  10 #include <linux/kdebug.h>
  11 #include <linux/sched.h>
  12 #include <linux/uaccess.h>
  13 #include <asm/cacheflush.h>
  14 #include <asm/current.h>
  15 #include <asm/disasm.h>
  16 
  17 #define MIN_STACK_SIZE(addr)    min((unsigned long)MAX_STACK_SIZE, \
  18                 (unsigned long)current_thread_info() + THREAD_SIZE - (addr))
  19 
  20 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
  21 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
  22 
  23 int __kprobes arch_prepare_kprobe(struct kprobe *p)
  24 {
  25         /* Attempt to probe at unaligned address */
  26         if ((unsigned long)p->addr & 0x01)
  27                 return -EINVAL;
  28 
  29         /* Address should not be in exception handling code */
  30 
  31         p->ainsn.is_short = is_short_instr((unsigned long)p->addr);
  32         p->opcode = *p->addr;
  33 
  34         return 0;
  35 }
  36 
  37 void __kprobes arch_arm_kprobe(struct kprobe *p)
  38 {
  39         *p->addr = UNIMP_S_INSTRUCTION;
  40 
  41         flush_icache_range((unsigned long)p->addr,
  42                            (unsigned long)p->addr + sizeof(kprobe_opcode_t));
  43 }
  44 
  45 void __kprobes arch_disarm_kprobe(struct kprobe *p)
  46 {
  47         *p->addr = p->opcode;
  48 
  49         flush_icache_range((unsigned long)p->addr,
  50                            (unsigned long)p->addr + sizeof(kprobe_opcode_t));
  51 }
  52 
  53 void __kprobes arch_remove_kprobe(struct kprobe *p)
  54 {
  55         arch_disarm_kprobe(p);
  56 
  57         /* Can we remove the kprobe in the middle of kprobe handling? */
  58         if (p->ainsn.t1_addr) {
  59                 *(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
  60 
  61                 flush_icache_range((unsigned long)p->ainsn.t1_addr,
  62                                    (unsigned long)p->ainsn.t1_addr +
  63                                    sizeof(kprobe_opcode_t));
  64 
  65                 p->ainsn.t1_addr = NULL;
  66         }
  67 
  68         if (p->ainsn.t2_addr) {
  69                 *(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
  70 
  71                 flush_icache_range((unsigned long)p->ainsn.t2_addr,
  72                                    (unsigned long)p->ainsn.t2_addr +
  73                                    sizeof(kprobe_opcode_t));
  74 
  75                 p->ainsn.t2_addr = NULL;
  76         }
  77 }
  78 
  79 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
  80 {
  81         kcb->prev_kprobe.kp = kprobe_running();
  82         kcb->prev_kprobe.status = kcb->kprobe_status;
  83 }
  84 
  85 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
  86 {
  87         __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
  88         kcb->kprobe_status = kcb->prev_kprobe.status;
  89 }
  90 
  91 static inline void __kprobes set_current_kprobe(struct kprobe *p)
  92 {
  93         __this_cpu_write(current_kprobe, p);
  94 }
  95 
  96 static void __kprobes resume_execution(struct kprobe *p, unsigned long addr,
  97                                        struct pt_regs *regs)
  98 {
  99         /* Remove the trap instructions inserted for single step and
 100          * restore the original instructions
 101          */
 102         if (p->ainsn.t1_addr) {
 103                 *(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
 104 
 105                 flush_icache_range((unsigned long)p->ainsn.t1_addr,
 106                                    (unsigned long)p->ainsn.t1_addr +
 107                                    sizeof(kprobe_opcode_t));
 108 
 109                 p->ainsn.t1_addr = NULL;
 110         }
 111 
 112         if (p->ainsn.t2_addr) {
 113                 *(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
 114 
 115                 flush_icache_range((unsigned long)p->ainsn.t2_addr,
 116                                    (unsigned long)p->ainsn.t2_addr +
 117                                    sizeof(kprobe_opcode_t));
 118 
 119                 p->ainsn.t2_addr = NULL;
 120         }
 121 
 122         return;
 123 }
 124 
 125 static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs)
 126 {
 127         unsigned long next_pc;
 128         unsigned long tgt_if_br = 0;
 129         int is_branch;
 130         unsigned long bta;
 131 
 132         /* Copy the opcode back to the kprobe location and execute the
 133          * instruction. Because of this we will not be able to get into the
 134          * same kprobe until this kprobe is done
 135          */
 136         *(p->addr) = p->opcode;
 137 
 138         flush_icache_range((unsigned long)p->addr,
 139                            (unsigned long)p->addr + sizeof(kprobe_opcode_t));
 140 
 141         /* Now we insert the trap at the next location after this instruction to
 142          * single step. If it is a branch we insert the trap at possible branch
 143          * targets
 144          */
 145 
 146         bta = regs->bta;
 147 
 148         if (regs->status32 & 0x40) {
 149                 /* We are in a delay slot with the branch taken */
 150 
 151                 next_pc = bta & ~0x01;
 152 
 153                 if (!p->ainsn.is_short) {
 154                         if (bta & 0x01)
 155                                 regs->blink += 2;
 156                         else {
 157                                 /* Branch not taken */
 158                                 next_pc += 2;
 159 
 160                                 /* next pc is taken from bta after executing the
 161                                  * delay slot instruction
 162                                  */
 163                                 regs->bta += 2;
 164                         }
 165                 }
 166 
 167                 is_branch = 0;
 168         } else
 169                 is_branch =
 170                     disasm_next_pc((unsigned long)p->addr, regs,
 171                         (struct callee_regs *) current->thread.callee_reg,
 172                         &next_pc, &tgt_if_br);
 173 
 174         p->ainsn.t1_addr = (kprobe_opcode_t *) next_pc;
 175         p->ainsn.t1_opcode = *(p->ainsn.t1_addr);
 176         *(p->ainsn.t1_addr) = TRAP_S_2_INSTRUCTION;
 177 
 178         flush_icache_range((unsigned long)p->ainsn.t1_addr,
 179                            (unsigned long)p->ainsn.t1_addr +
 180                            sizeof(kprobe_opcode_t));
 181 
 182         if (is_branch) {
 183                 p->ainsn.t2_addr = (kprobe_opcode_t *) tgt_if_br;
 184                 p->ainsn.t2_opcode = *(p->ainsn.t2_addr);
 185                 *(p->ainsn.t2_addr) = TRAP_S_2_INSTRUCTION;
 186 
 187                 flush_icache_range((unsigned long)p->ainsn.t2_addr,
 188                                    (unsigned long)p->ainsn.t2_addr +
 189                                    sizeof(kprobe_opcode_t));
 190         }
 191 }
 192 
 193 int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
 194 {
 195         struct kprobe *p;
 196         struct kprobe_ctlblk *kcb;
 197 
 198         preempt_disable();
 199 
 200         kcb = get_kprobe_ctlblk();
 201         p = get_kprobe((unsigned long *)addr);
 202 
 203         if (p) {
 204                 /*
 205                  * We have reentered the kprobe_handler, since another kprobe
 206                  * was hit while within the handler, we save the original
 207                  * kprobes and single step on the instruction of the new probe
 208                  * without calling any user handlers to avoid recursive
 209                  * kprobes.
 210                  */
 211                 if (kprobe_running()) {
 212                         save_previous_kprobe(kcb);
 213                         set_current_kprobe(p);
 214                         kprobes_inc_nmissed_count(p);
 215                         setup_singlestep(p, regs);
 216                         kcb->kprobe_status = KPROBE_REENTER;
 217                         return 1;
 218                 }
 219 
 220                 set_current_kprobe(p);
 221                 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
 222 
 223                 /* If we have no pre-handler or it returned 0, we continue with
 224                  * normal processing. If we have a pre-handler and it returned
 225                  * non-zero - which means user handler setup registers to exit
 226                  * to another instruction, we must skip the single stepping.
 227                  */
 228                 if (!p->pre_handler || !p->pre_handler(p, regs)) {
 229                         setup_singlestep(p, regs);
 230                         kcb->kprobe_status = KPROBE_HIT_SS;
 231                 } else {
 232                         reset_current_kprobe();
 233                         preempt_enable_no_resched();
 234                 }
 235 
 236                 return 1;
 237         }
 238 
 239         /* no_kprobe: */
 240         preempt_enable_no_resched();
 241         return 0;
 242 }
 243 
 244 static int __kprobes arc_post_kprobe_handler(unsigned long addr,
 245                                          struct pt_regs *regs)
 246 {
 247         struct kprobe *cur = kprobe_running();
 248         struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 249 
 250         if (!cur)
 251                 return 0;
 252 
 253         resume_execution(cur, addr, regs);
 254 
 255         /* Rearm the kprobe */
 256         arch_arm_kprobe(cur);
 257 
 258         /*
 259          * When we return from trap instruction we go to the next instruction
 260          * We restored the actual instruction in resume_exectuiont and we to
 261          * return to the same address and execute it
 262          */
 263         regs->ret = addr;
 264 
 265         if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
 266                 kcb->kprobe_status = KPROBE_HIT_SSDONE;
 267                 cur->post_handler(cur, regs, 0);
 268         }
 269 
 270         if (kcb->kprobe_status == KPROBE_REENTER) {
 271                 restore_previous_kprobe(kcb);
 272                 goto out;
 273         }
 274 
 275         reset_current_kprobe();
 276 
 277 out:
 278         preempt_enable_no_resched();
 279         return 1;
 280 }
 281 
 282 /*
 283  * Fault can be for the instruction being single stepped or for the
 284  * pre/post handlers in the module.
 285  * This is applicable for applications like user probes, where we have the
 286  * probe in user space and the handlers in the kernel
 287  */
 288 
 289 int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned long trapnr)
 290 {
 291         struct kprobe *cur = kprobe_running();
 292         struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 293 
 294         switch (kcb->kprobe_status) {
 295         case KPROBE_HIT_SS:
 296         case KPROBE_REENTER:
 297                 /*
 298                  * We are here because the instruction being single stepped
 299                  * caused the fault. We reset the current kprobe and allow the
 300                  * exception handler as if it is regular exception. In our
 301                  * case it doesn't matter because the system will be halted
 302                  */
 303                 resume_execution(cur, (unsigned long)cur->addr, regs);
 304 
 305                 if (kcb->kprobe_status == KPROBE_REENTER)
 306                         restore_previous_kprobe(kcb);
 307                 else
 308                         reset_current_kprobe();
 309 
 310                 preempt_enable_no_resched();
 311                 break;
 312 
 313         case KPROBE_HIT_ACTIVE:
 314         case KPROBE_HIT_SSDONE:
 315                 /*
 316                  * We are here because the instructions in the pre/post handler
 317                  * caused the fault.
 318                  */
 319 
 320                 /* We increment the nmissed count for accounting,
 321                  * we can also use npre/npostfault count for accounting
 322                  * these specific fault cases.
 323                  */
 324                 kprobes_inc_nmissed_count(cur);
 325 
 326                 /*
 327                  * We come here because instructions in the pre/post
 328                  * handler caused the page_fault, this could happen
 329                  * if handler tries to access user space by
 330                  * copy_from_user(), get_user() etc. Let the
 331                  * user-specified handler try to fix it first.
 332                  */
 333                 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
 334                         return 1;
 335 
 336                 /*
 337                  * In case the user-specified fault handler returned zero,
 338                  * try to fix up.
 339                  */
 340                 if (fixup_exception(regs))
 341                         return 1;
 342 
 343                 /*
 344                  * fixup_exception() could not handle it,
 345                  * Let do_page_fault() fix it.
 346                  */
 347                 break;
 348 
 349         default:
 350                 break;
 351         }
 352         return 0;
 353 }
 354 
 355 int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
 356                                        unsigned long val, void *data)
 357 {
 358         struct die_args *args = data;
 359         unsigned long addr = args->err;
 360         int ret = NOTIFY_DONE;
 361 
 362         switch (val) {
 363         case DIE_IERR:
 364                 if (arc_kprobe_handler(addr, args->regs))
 365                         return NOTIFY_STOP;
 366                 break;
 367 
 368         case DIE_TRAP:
 369                 if (arc_post_kprobe_handler(addr, args->regs))
 370                         return NOTIFY_STOP;
 371                 break;
 372 
 373         default:
 374                 break;
 375         }
 376 
 377         return ret;
 378 }
 379 
 380 static void __used kretprobe_trampoline_holder(void)
 381 {
 382         __asm__ __volatile__(".global kretprobe_trampoline\n"
 383                              "kretprobe_trampoline:\n" "nop\n");
 384 }
 385 
 386 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
 387                                       struct pt_regs *regs)
 388 {
 389 
 390         ri->ret_addr = (kprobe_opcode_t *) regs->blink;
 391 
 392         /* Replace the return addr with trampoline addr */
 393         regs->blink = (unsigned long)&kretprobe_trampoline;
 394 }
 395 
 396 static int __kprobes trampoline_probe_handler(struct kprobe *p,
 397                                               struct pt_regs *regs)
 398 {
 399         struct kretprobe_instance *ri = NULL;
 400         struct hlist_head *head, empty_rp;
 401         struct hlist_node *tmp;
 402         unsigned long flags, orig_ret_address = 0;
 403         unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
 404 
 405         INIT_HLIST_HEAD(&empty_rp);
 406         kretprobe_hash_lock(current, &head, &flags);
 407 
 408         /*
 409          * It is possible to have multiple instances associated with a given
 410          * task either because an multiple functions in the call path
 411          * have a return probe installed on them, and/or more than one return
 412          * return probe was registered for a target function.
 413          *
 414          * We can handle this because:
 415          *     - instances are always inserted at the head of the list
 416          *     - when multiple return probes are registered for the same
 417          *       function, the first instance's ret_addr will point to the
 418          *       real return address, and all the rest will point to
 419          *       kretprobe_trampoline
 420          */
 421         hlist_for_each_entry_safe(ri, tmp, head, hlist) {
 422                 if (ri->task != current)
 423                         /* another task is sharing our hash bucket */
 424                         continue;
 425 
 426                 if (ri->rp && ri->rp->handler)
 427                         ri->rp->handler(ri, regs);
 428 
 429                 orig_ret_address = (unsigned long)ri->ret_addr;
 430                 recycle_rp_inst(ri, &empty_rp);
 431 
 432                 if (orig_ret_address != trampoline_address) {
 433                         /*
 434                          * This is the real return address. Any other
 435                          * instances associated with this task are for
 436                          * other calls deeper on the call stack
 437                          */
 438                         break;
 439                 }
 440         }
 441 
 442         kretprobe_assert(ri, orig_ret_address, trampoline_address);
 443         regs->ret = orig_ret_address;
 444 
 445         kretprobe_hash_unlock(current, &flags);
 446 
 447         hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
 448                 hlist_del(&ri->hlist);
 449                 kfree(ri);
 450         }
 451 
 452         /* By returning a non zero value, we are telling the kprobe handler
 453          * that we don't want the post_handler to run
 454          */
 455         return 1;
 456 }
 457 
 458 static struct kprobe trampoline_p = {
 459         .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
 460         .pre_handler = trampoline_probe_handler
 461 };
 462 
 463 int __init arch_init_kprobes(void)
 464 {
 465         /* Registering the trampoline code for the kret probe */
 466         return register_kprobe(&trampoline_p);
 467 }
 468 
 469 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
 470 {
 471         if (p->addr == (kprobe_opcode_t *) &kretprobe_trampoline)
 472                 return 1;
 473 
 474         return 0;
 475 }
 476 
 477 void trap_is_kprobe(unsigned long address, struct pt_regs *regs)
 478 {
 479         notify_die(DIE_TRAP, "kprobe_trap", regs, address, 0, SIGTRAP);
 480 }

/* [<][>][^][v][top][bottom][index][help] */