root/arch/arm64/kernel/ftrace.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ftrace_modify_code
  2. ftrace_update_ftrace_func
  3. ftrace_make_call
  4. ftrace_make_nop
  5. arch_ftrace_update_code
  6. ftrace_dyn_arch_init
  7. prepare_ftrace_return
  8. ftrace_modify_graph_caller
  9. ftrace_enable_ftrace_graph_caller
  10. ftrace_disable_ftrace_graph_caller

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * arch/arm64/kernel/ftrace.c
   4  *
   5  * Copyright (C) 2013 Linaro Limited
   6  * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
   7  */
   8 
   9 #include <linux/ftrace.h>
  10 #include <linux/module.h>
  11 #include <linux/swab.h>
  12 #include <linux/uaccess.h>
  13 
  14 #include <asm/cacheflush.h>
  15 #include <asm/debug-monitors.h>
  16 #include <asm/ftrace.h>
  17 #include <asm/insn.h>
  18 
  19 #ifdef CONFIG_DYNAMIC_FTRACE
  20 /*
  21  * Replace a single instruction, which may be a branch or NOP.
  22  * If @validate == true, a replaced instruction is checked against 'old'.
  23  */
  24 static int ftrace_modify_code(unsigned long pc, u32 old, u32 new,
  25                               bool validate)
  26 {
  27         u32 replaced;
  28 
  29         /*
  30          * Note:
  31          * We are paranoid about modifying text, as if a bug were to happen, it
  32          * could cause us to read or write to someplace that could cause harm.
  33          * Carefully read and modify the code with aarch64_insn_*() which uses
  34          * probe_kernel_*(), and make sure what we read is what we expected it
  35          * to be before modifying it.
  36          */
  37         if (validate) {
  38                 if (aarch64_insn_read((void *)pc, &replaced))
  39                         return -EFAULT;
  40 
  41                 if (replaced != old)
  42                         return -EINVAL;
  43         }
  44         if (aarch64_insn_patch_text_nosync((void *)pc, new))
  45                 return -EPERM;
  46 
  47         return 0;
  48 }
  49 
  50 /*
  51  * Replace tracer function in ftrace_caller()
  52  */
  53 int ftrace_update_ftrace_func(ftrace_func_t func)
  54 {
  55         unsigned long pc;
  56         u32 new;
  57 
  58         pc = (unsigned long)&ftrace_call;
  59         new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func,
  60                                           AARCH64_INSN_BRANCH_LINK);
  61 
  62         return ftrace_modify_code(pc, 0, new, false);
  63 }
  64 
  65 /*
  66  * Turn on the call to ftrace_caller() in instrumented function
  67  */
  68 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  69 {
  70         unsigned long pc = rec->ip;
  71         u32 old, new;
  72         long offset = (long)pc - (long)addr;
  73 
  74         if (offset < -SZ_128M || offset >= SZ_128M) {
  75 #ifdef CONFIG_ARM64_MODULE_PLTS
  76                 struct plt_entry trampoline, *dst;
  77                 struct module *mod;
  78 
  79                 /*
  80                  * On kernels that support module PLTs, the offset between the
  81                  * branch instruction and its target may legally exceed the
  82                  * range of an ordinary relative 'bl' opcode. In this case, we
  83                  * need to branch via a trampoline in the module.
  84                  *
  85                  * NOTE: __module_text_address() must be called with preemption
  86                  * disabled, but we can rely on ftrace_lock to ensure that 'mod'
  87                  * retains its validity throughout the remainder of this code.
  88                  */
  89                 preempt_disable();
  90                 mod = __module_text_address(pc);
  91                 preempt_enable();
  92 
  93                 if (WARN_ON(!mod))
  94                         return -EINVAL;
  95 
  96                 /*
  97                  * There is only one ftrace trampoline per module. For now,
  98                  * this is not a problem since on arm64, all dynamic ftrace
  99                  * invocations are routed via ftrace_caller(). This will need
 100                  * to be revisited if support for multiple ftrace entry points
 101                  * is added in the future, but for now, the pr_err() below
 102                  * deals with a theoretical issue only.
 103                  *
 104                  * Note that PLTs are place relative, and plt_entries_equal()
 105                  * checks whether they point to the same target. Here, we need
 106                  * to check if the actual opcodes are in fact identical,
 107                  * regardless of the offset in memory so use memcmp() instead.
 108                  */
 109                 dst = mod->arch.ftrace_trampoline;
 110                 trampoline = get_plt_entry(addr, dst);
 111                 if (memcmp(dst, &trampoline, sizeof(trampoline))) {
 112                         if (plt_entry_is_initialized(dst)) {
 113                                 pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
 114                                 return -EINVAL;
 115                         }
 116 
 117                         /* point the trampoline to our ftrace entry point */
 118                         module_disable_ro(mod);
 119                         *dst = trampoline;
 120                         module_enable_ro(mod, true);
 121 
 122                         /*
 123                          * Ensure updated trampoline is visible to instruction
 124                          * fetch before we patch in the branch. Although the
 125                          * architecture doesn't require an IPI in this case,
 126                          * Neoverse-N1 erratum #1542419 does require one
 127                          * if the TLB maintenance in module_enable_ro() is
 128                          * skipped due to rodata_enabled. It doesn't seem worth
 129                          * it to make it conditional given that this is
 130                          * certainly not a fast-path.
 131                          */
 132                         flush_icache_range((unsigned long)&dst[0],
 133                                            (unsigned long)&dst[1]);
 134                 }
 135                 addr = (unsigned long)dst;
 136 #else /* CONFIG_ARM64_MODULE_PLTS */
 137                 return -EINVAL;
 138 #endif /* CONFIG_ARM64_MODULE_PLTS */
 139         }
 140 
 141         old = aarch64_insn_gen_nop();
 142         new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
 143 
 144         return ftrace_modify_code(pc, old, new, true);
 145 }
 146 
 147 /*
 148  * Turn off the call to ftrace_caller() in instrumented function
 149  */
 150 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
 151                     unsigned long addr)
 152 {
 153         unsigned long pc = rec->ip;
 154         bool validate = true;
 155         u32 old = 0, new;
 156         long offset = (long)pc - (long)addr;
 157 
 158         if (offset < -SZ_128M || offset >= SZ_128M) {
 159 #ifdef CONFIG_ARM64_MODULE_PLTS
 160                 u32 replaced;
 161 
 162                 /*
 163                  * 'mod' is only set at module load time, but if we end up
 164                  * dealing with an out-of-range condition, we can assume it
 165                  * is due to a module being loaded far away from the kernel.
 166                  */
 167                 if (!mod) {
 168                         preempt_disable();
 169                         mod = __module_text_address(pc);
 170                         preempt_enable();
 171 
 172                         if (WARN_ON(!mod))
 173                                 return -EINVAL;
 174                 }
 175 
 176                 /*
 177                  * The instruction we are about to patch may be a branch and
 178                  * link instruction that was redirected via a PLT entry. In
 179                  * this case, the normal validation will fail, but we can at
 180                  * least check that we are dealing with a branch and link
 181                  * instruction that points into the right module.
 182                  */
 183                 if (aarch64_insn_read((void *)pc, &replaced))
 184                         return -EFAULT;
 185 
 186                 if (!aarch64_insn_is_bl(replaced) ||
 187                     !within_module(pc + aarch64_get_branch_offset(replaced),
 188                                    mod))
 189                         return -EINVAL;
 190 
 191                 validate = false;
 192 #else /* CONFIG_ARM64_MODULE_PLTS */
 193                 return -EINVAL;
 194 #endif /* CONFIG_ARM64_MODULE_PLTS */
 195         } else {
 196                 old = aarch64_insn_gen_branch_imm(pc, addr,
 197                                                   AARCH64_INSN_BRANCH_LINK);
 198         }
 199 
 200         new = aarch64_insn_gen_nop();
 201 
 202         return ftrace_modify_code(pc, old, new, validate);
 203 }
 204 
 205 void arch_ftrace_update_code(int command)
 206 {
 207         command |= FTRACE_MAY_SLEEP;
 208         ftrace_modify_all_code(command);
 209 }
 210 
 211 int __init ftrace_dyn_arch_init(void)
 212 {
 213         return 0;
 214 }
 215 #endif /* CONFIG_DYNAMIC_FTRACE */
 216 
 217 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 218 /*
 219  * function_graph tracer expects ftrace_return_to_handler() to be called
 220  * on the way back to parent. For this purpose, this function is called
 221  * in _mcount() or ftrace_caller() to replace return address (*parent) on
 222  * the call stack to return_to_handler.
 223  *
 224  * Note that @frame_pointer is used only for sanity check later.
 225  */
 226 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
 227                            unsigned long frame_pointer)
 228 {
 229         unsigned long return_hooker = (unsigned long)&return_to_handler;
 230         unsigned long old;
 231 
 232         if (unlikely(atomic_read(&current->tracing_graph_pause)))
 233                 return;
 234 
 235         /*
 236          * Note:
 237          * No protection against faulting at *parent, which may be seen
 238          * on other archs. It's unlikely on AArch64.
 239          */
 240         old = *parent;
 241 
 242         if (!function_graph_enter(old, self_addr, frame_pointer, NULL))
 243                 *parent = return_hooker;
 244 }
 245 
 246 #ifdef CONFIG_DYNAMIC_FTRACE
 247 /*
 248  * Turn on/off the call to ftrace_graph_caller() in ftrace_caller()
 249  * depending on @enable.
 250  */
 251 static int ftrace_modify_graph_caller(bool enable)
 252 {
 253         unsigned long pc = (unsigned long)&ftrace_graph_call;
 254         u32 branch, nop;
 255 
 256         branch = aarch64_insn_gen_branch_imm(pc,
 257                                              (unsigned long)ftrace_graph_caller,
 258                                              AARCH64_INSN_BRANCH_NOLINK);
 259         nop = aarch64_insn_gen_nop();
 260 
 261         if (enable)
 262                 return ftrace_modify_code(pc, nop, branch, true);
 263         else
 264                 return ftrace_modify_code(pc, branch, nop, true);
 265 }
 266 
 267 int ftrace_enable_ftrace_graph_caller(void)
 268 {
 269         return ftrace_modify_graph_caller(true);
 270 }
 271 
 272 int ftrace_disable_ftrace_graph_caller(void)
 273 {
 274         return ftrace_modify_graph_caller(false);
 275 }
 276 #endif /* CONFIG_DYNAMIC_FTRACE */
 277 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */

/* [<][>][^][v][top][bottom][index][help] */