Lines Matching refs:kp
45 struct kprobe *kp; in __recover_optprobed_insn() local
50 kp = get_kprobe((void *)addr - i); in __recover_optprobed_insn()
52 if (kp && kprobe_optimized(kp)) { in __recover_optprobed_insn()
53 op = container_of(kp, struct optimized_kprobe, kp); in __recover_optprobed_insn()
68 if (addr == (unsigned long)kp->addr) { in __recover_optprobed_insn()
69 buf[0] = kp->opcode; in __recover_optprobed_insn()
72 offs = addr - (unsigned long)kp->addr - 1; in __recover_optprobed_insn()
148 if (kprobe_disabled(&op->kp)) in optimized_callback()
153 kprobes_inc_nmissed_count(&op->kp); in optimized_callback()
162 regs->ip = (unsigned long)op->kp.addr + INT3_SIZE; in optimized_callback()
165 __this_cpu_write(current_kprobe, &op->kp); in optimized_callback()
167 opt_pre_handler(&op->kp, regs); in optimized_callback()
290 p = get_kprobe(op->kp.addr + i); in arch_check_optimized_kprobe()
302 return ((unsigned long)op->kp.addr <= addr && in arch_within_optimized_kprobe()
303 (unsigned long)op->kp.addr + op->optinsn.size > addr); in arch_within_optimized_kprobe()
334 if (!can_optimize((unsigned long)op->kp.addr)) in arch_prepare_optimized_kprobe()
345 rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE; in arch_prepare_optimized_kprobe()
354 ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr); in arch_prepare_optimized_kprobe()
372 (u8 *)op->kp.addr + op->optinsn.size); in arch_prepare_optimized_kprobe()
391 ((long)op->kp.addr + RELATIVEJUMP_SIZE)); in arch_optimize_kprobes()
393 WARN_ON(kprobe_disabled(&op->kp)); in arch_optimize_kprobes()
396 memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE, in arch_optimize_kprobes()
402 text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE, in arch_optimize_kprobes()
417 text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE, in arch_unoptimize_kprobe()
442 op = container_of(p, struct optimized_kprobe, kp); in setup_detour_execution()