root/arch/parisc/kernel/ftrace.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. prepare_ftrace_return
  2. ftrace_function_trampoline
  3. ftrace_enable_ftrace_graph_caller
  4. ftrace_disable_ftrace_graph_caller
  5. ftrace_dyn_arch_init
  6. ftrace_update_ftrace_func
  7. ftrace_modify_call
  8. ftrace_call_adjust
  9. ftrace_make_call
  10. ftrace_make_nop
  11. kprobe_ftrace_handler
  12. arch_prepare_kprobe_ftrace

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Code for tracing calls in Linux kernel.
   4  * Copyright (C) 2009-2016 Helge Deller <deller@gmx.de>
   5  *
   6  * based on code for x86 which is:
   7  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
   8  *
   9  * future possible enhancements:
  10  *      - add CONFIG_STACK_TRACER
  11  */
  12 
  13 #include <linux/init.h>
  14 #include <linux/ftrace.h>
  15 #include <linux/uaccess.h>
  16 #include <linux/kprobes.h>
  17 #include <linux/ptrace.h>
  18 
  19 #include <asm/assembly.h>
  20 #include <asm/sections.h>
  21 #include <asm/ftrace.h>
  22 #include <asm/patch.h>
  23 
  24 #define __hot __attribute__ ((__section__ (".text.hot")))
  25 
  26 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  27 /*
  28  * Hook the return address and push it in the stack of return addrs
  29  * in current thread info.
  30  */
  31 static void __hot prepare_ftrace_return(unsigned long *parent,
  32                                         unsigned long self_addr)
  33 {
  34         unsigned long old;
  35         extern int parisc_return_to_handler;
  36 
  37         if (unlikely(ftrace_graph_is_dead()))
  38                 return;
  39 
  40         if (unlikely(atomic_read(&current->tracing_graph_pause)))
  41                 return;
  42 
  43         old = *parent;
  44 
  45         if (!function_graph_enter(old, self_addr, 0, NULL))
  46                 /* activate parisc_return_to_handler() as return point */
  47                 *parent = (unsigned long) &parisc_return_to_handler;
  48 }
  49 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  50 
  51 void notrace __hot ftrace_function_trampoline(unsigned long parent,
  52                                 unsigned long self_addr,
  53                                 unsigned long org_sp_gr3,
  54                                 struct pt_regs *regs)
  55 {
  56 #ifndef CONFIG_DYNAMIC_FTRACE
  57         extern ftrace_func_t ftrace_trace_function;
  58 #endif
  59         extern struct ftrace_ops *function_trace_op;
  60 
  61         if (function_trace_op->flags & FTRACE_OPS_FL_ENABLED &&
  62             ftrace_trace_function != ftrace_stub)
  63                 ftrace_trace_function(self_addr, parent,
  64                                 function_trace_op, regs);
  65 
  66 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  67         if (ftrace_graph_return != (trace_func_graph_ret_t) ftrace_stub ||
  68             ftrace_graph_entry != ftrace_graph_entry_stub) {
  69                 unsigned long *parent_rp;
  70 
  71                 /* calculate pointer to %rp in stack */
  72                 parent_rp = (unsigned long *) (org_sp_gr3 - RP_OFFSET);
  73                 /* sanity check: parent_rp should hold parent */
  74                 if (*parent_rp != parent)
  75                         return;
  76 
  77                 prepare_ftrace_return(parent_rp, self_addr);
  78                 return;
  79         }
  80 #endif
  81 }
  82 
  83 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  84 int ftrace_enable_ftrace_graph_caller(void)
  85 {
  86         return 0;
  87 }
  88 
  89 int ftrace_disable_ftrace_graph_caller(void)
  90 {
  91         return 0;
  92 }
  93 #endif
  94 
  95 #ifdef CONFIG_DYNAMIC_FTRACE
  96 
  97 int __init ftrace_dyn_arch_init(void)
  98 {
  99         return 0;
 100 }
 101 int ftrace_update_ftrace_func(ftrace_func_t func)
 102 {
 103         return 0;
 104 }
 105 
 106 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
 107                         unsigned long addr)
 108 {
 109         return 0;
 110 }
 111 
 112 unsigned long ftrace_call_adjust(unsigned long addr)
 113 {
 114         return addr+(FTRACE_PATCHABLE_FUNCTION_SIZE-1)*4;
 115 }
 116 
 117 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 118 {
 119         u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZE];
 120         u32 *tramp;
 121         int size, ret, i;
 122         void *ip;
 123 
 124 #ifdef CONFIG_64BIT
 125         unsigned long addr2 =
 126                 (unsigned long)dereference_function_descriptor((void *)addr);
 127 
 128         u32 ftrace_trampoline[] = {
 129                 0x73c10208, /* std,ma r1,100(sp) */
 130                 0x0c2110c1, /* ldd -10(r1),r1 */
 131                 0xe820d002, /* bve,n (r1) */
 132                 addr2 >> 32,
 133                 addr2 & 0xffffffff,
 134                 0xe83f1fd7, /* b,l,n .-14,r1 */
 135         };
 136 
 137         u32 ftrace_trampoline_unaligned[] = {
 138                 addr2 >> 32,
 139                 addr2 & 0xffffffff,
 140                 0x37de0200, /* ldo 100(sp),sp */
 141                 0x73c13e01, /* std r1,-100(sp) */
 142                 0x34213ff9, /* ldo -4(r1),r1 */
 143                 0x50213fc1, /* ldd -20(r1),r1 */
 144                 0xe820d002, /* bve,n (r1) */
 145                 0xe83f1fcf, /* b,l,n .-20,r1 */
 146         };
 147 
 148         BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline_unaligned) >
 149                                 FTRACE_PATCHABLE_FUNCTION_SIZE);
 150 #else
 151         u32 ftrace_trampoline[] = {
 152                 (u32)addr,
 153                 0x6fc10080, /* stw,ma r1,40(sp) */
 154                 0x48213fd1, /* ldw -18(r1),r1 */
 155                 0xe820c002, /* bv,n r0(r1) */
 156                 0xe83f1fdf, /* b,l,n .-c,r1 */
 157         };
 158 #endif
 159 
 160         BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline) >
 161                                 FTRACE_PATCHABLE_FUNCTION_SIZE);
 162 
 163         size = sizeof(ftrace_trampoline);
 164         tramp = ftrace_trampoline;
 165 
 166 #ifdef CONFIG_64BIT
 167         if (rec->ip & 0x4) {
 168                 size = sizeof(ftrace_trampoline_unaligned);
 169                 tramp = ftrace_trampoline_unaligned;
 170         }
 171 #endif
 172 
 173         ip = (void *)(rec->ip + 4 - size);
 174 
 175         ret = probe_kernel_read(insn, ip, size);
 176         if (ret)
 177                 return ret;
 178 
 179         for (i = 0; i < size / 4; i++) {
 180                 if (insn[i] != INSN_NOP)
 181                         return -EINVAL;
 182         }
 183 
 184         __patch_text_multiple(ip, tramp, size);
 185         return 0;
 186 }
 187 
 188 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
 189                     unsigned long addr)
 190 {
 191         u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZE];
 192         int i;
 193 
 194         for (i = 0; i < ARRAY_SIZE(insn); i++)
 195                 insn[i] = INSN_NOP;
 196 
 197         __patch_text((void *)rec->ip, INSN_NOP);
 198         __patch_text_multiple((void *)rec->ip + 4 - sizeof(insn),
 199                               insn, sizeof(insn)-4);
 200         return 0;
 201 }
 202 #endif
 203 
 204 #ifdef CONFIG_KPROBES_ON_FTRACE
 205 void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
 206                            struct ftrace_ops *ops, struct pt_regs *regs)
 207 {
 208         struct kprobe_ctlblk *kcb;
 209         struct kprobe *p = get_kprobe((kprobe_opcode_t *)ip);
 210 
 211         if (unlikely(!p) || kprobe_disabled(p))
 212                 return;
 213 
 214         if (kprobe_running()) {
 215                 kprobes_inc_nmissed_count(p);
 216                 return;
 217         }
 218 
 219         __this_cpu_write(current_kprobe, p);
 220 
 221         kcb = get_kprobe_ctlblk();
 222         kcb->kprobe_status = KPROBE_HIT_ACTIVE;
 223 
 224         regs->iaoq[0] = ip;
 225         regs->iaoq[1] = ip + 4;
 226 
 227         if (!p->pre_handler || !p->pre_handler(p, regs)) {
 228                 regs->iaoq[0] = ip + 4;
 229                 regs->iaoq[1] = ip + 8;
 230 
 231                 if (unlikely(p->post_handler)) {
 232                         kcb->kprobe_status = KPROBE_HIT_SSDONE;
 233                         p->post_handler(p, regs, 0);
 234                 }
 235         }
 236         __this_cpu_write(current_kprobe, NULL);
 237 }
 238 NOKPROBE_SYMBOL(kprobe_ftrace_handler);
 239 
 240 int arch_prepare_kprobe_ftrace(struct kprobe *p)
 241 {
 242         p->ainsn.insn = NULL;
 243         return 0;
 244 }
 245 #endif

/* [<][>][^][v][top][bottom][index][help] */