root/arch/arm64/kernel/entry-ftrace.S

/* [<][>][^][v][top][bottom][index][help] */
   1 /* SPDX-License-Identifier: GPL-2.0-only */
   2 /*
   3  * arch/arm64/kernel/entry-ftrace.S
   4  *
   5  * Copyright (C) 2013 Linaro Limited
   6  * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
   7  */
   8 
   9 #include <linux/linkage.h>
  10 #include <asm/assembler.h>
  11 #include <asm/ftrace.h>
  12 #include <asm/insn.h>
  13 
  14 /*
  15  * Gcc with -pg will put the following code in the beginning of each function:
  16  *      mov x0, x30
  17  *      bl _mcount
  18  *      [function's body ...]
  19  * "bl _mcount" may be replaced to "bl ftrace_caller" or NOP if dynamic
  20  * ftrace is enabled.
  21  *
  22  * Please note that x0 as an argument will not be used here because we can
  23  * get lr(x30) of instrumented function at any time by winding up call stack
  24  * as long as the kernel is compiled without -fomit-frame-pointer.
  25  * (or CONFIG_FRAME_POINTER, this is forced on arm64)
  26  *
  27  * stack layout after mcount_enter in _mcount():
  28  *
  29  * current sp/fp =>  0:+-----+
  30  * in _mcount()        | x29 | -> instrumented function's fp
  31  *                     +-----+
  32  *                     | x30 | -> _mcount()'s lr (= instrumented function's pc)
  33  * old sp       => +16:+-----+
  34  * when instrumented   |     |
  35  * function calls      | ... |
  36  * _mcount()           |     |
  37  *                     |     |
  38  * instrumented => +xx:+-----+
  39  * function's fp       | x29 | -> parent's fp
  40  *                     +-----+
  41  *                     | x30 | -> instrumented function's lr (= parent's pc)
  42  *                     +-----+
  43  *                     | ... |
  44  */
  45 
  46         .macro mcount_enter
  47         stp     x29, x30, [sp, #-16]!
  48         mov     x29, sp
  49         .endm
  50 
  51         .macro mcount_exit
  52         ldp     x29, x30, [sp], #16
  53         ret
  54         .endm
  55 
  56         .macro mcount_adjust_addr rd, rn
  57         sub     \rd, \rn, #AARCH64_INSN_SIZE
  58         .endm
  59 
  60         /* for instrumented function's parent */
  61         .macro mcount_get_parent_fp reg
  62         ldr     \reg, [x29]
  63         ldr     \reg, [\reg]
  64         .endm
  65 
  66         /* for instrumented function */
  67         .macro mcount_get_pc0 reg
  68         mcount_adjust_addr      \reg, x30
  69         .endm
  70 
  71         .macro mcount_get_pc reg
  72         ldr     \reg, [x29, #8]
  73         mcount_adjust_addr      \reg, \reg
  74         .endm
  75 
  76         .macro mcount_get_lr reg
  77         ldr     \reg, [x29]
  78         ldr     \reg, [\reg, #8]
  79         .endm
  80 
  81         .macro mcount_get_lr_addr reg
  82         ldr     \reg, [x29]
  83         add     \reg, \reg, #8
  84         .endm
  85 
  86 #ifndef CONFIG_DYNAMIC_FTRACE
  87 /*
  88  * void _mcount(unsigned long return_address)
  89  * @return_address: return address to instrumented function
  90  *
  91  * This function makes calls, if enabled, to:
  92  *     - tracer function to probe instrumented function's entry,
  93  *     - ftrace_graph_caller to set up an exit hook
  94  */
  95 ENTRY(_mcount)
  96         mcount_enter
  97 
  98         ldr_l   x2, ftrace_trace_function
  99         adr     x0, ftrace_stub
 100         cmp     x0, x2                  // if (ftrace_trace_function
 101         b.eq    skip_ftrace_call        //     != ftrace_stub) {
 102 
 103         mcount_get_pc   x0              //       function's pc
 104         mcount_get_lr   x1              //       function's lr (= parent's pc)
 105         blr     x2                      //   (*ftrace_trace_function)(pc, lr);
 106 
 107 skip_ftrace_call:                       // }
 108 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 109         ldr_l   x2, ftrace_graph_return
 110         cmp     x0, x2                  //   if ((ftrace_graph_return
 111         b.ne    ftrace_graph_caller     //        != ftrace_stub)
 112 
 113         ldr_l   x2, ftrace_graph_entry  //     || (ftrace_graph_entry
 114         adr_l   x0, ftrace_graph_entry_stub //     != ftrace_graph_entry_stub))
 115         cmp     x0, x2
 116         b.ne    ftrace_graph_caller     //     ftrace_graph_caller();
 117 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 118         mcount_exit
 119 ENDPROC(_mcount)
 120 EXPORT_SYMBOL(_mcount)
 121 NOKPROBE(_mcount)
 122 
 123 #else /* CONFIG_DYNAMIC_FTRACE */
 124 /*
 125  * _mcount() is used to build the kernel with -pg option, but all the branch
 126  * instructions to _mcount() are replaced to NOP initially at kernel start up,
 127  * and later on, NOP to branch to ftrace_caller() when enabled or branch to
 128  * NOP when disabled per-function base.
 129  */
 130 ENTRY(_mcount)
 131         ret
 132 ENDPROC(_mcount)
 133 EXPORT_SYMBOL(_mcount)
 134 NOKPROBE(_mcount)
 135 
 136 /*
 137  * void ftrace_caller(unsigned long return_address)
 138  * @return_address: return address to instrumented function
 139  *
 140  * This function is a counterpart of _mcount() in 'static' ftrace, and
 141  * makes calls to:
 142  *     - tracer function to probe instrumented function's entry,
 143  *     - ftrace_graph_caller to set up an exit hook
 144  */
 145 ENTRY(ftrace_caller)
 146         mcount_enter
 147 
 148         mcount_get_pc0  x0              //     function's pc
 149         mcount_get_lr   x1              //     function's lr
 150 
 151 GLOBAL(ftrace_call)                     // tracer(pc, lr);
 152         nop                             // This will be replaced with "bl xxx"
 153                                         // where xxx can be any kind of tracer.
 154 
 155 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 156 GLOBAL(ftrace_graph_call)               // ftrace_graph_caller();
 157         nop                             // If enabled, this will be replaced
 158                                         // "b ftrace_graph_caller"
 159 #endif
 160 
 161         mcount_exit
 162 ENDPROC(ftrace_caller)
 163 #endif /* CONFIG_DYNAMIC_FTRACE */
 164 
 165 ENTRY(ftrace_stub)
 166         ret
 167 ENDPROC(ftrace_stub)
 168 
 169 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 170 /*
 171  * void ftrace_graph_caller(void)
 172  *
 173  * Called from _mcount() or ftrace_caller() when function_graph tracer is
 174  * selected.
 175  * This function w/ prepare_ftrace_return() fakes link register's value on
 176  * the call stack in order to intercept instrumented function's return path
 177  * and run return_to_handler() later on its exit.
 178  */
 179 ENTRY(ftrace_graph_caller)
 180         mcount_get_pc             x0    //     function's pc
 181         mcount_get_lr_addr        x1    //     pointer to function's saved lr
 182         mcount_get_parent_fp      x2    //     parent's fp
 183         bl      prepare_ftrace_return   // prepare_ftrace_return(pc, &lr, fp)
 184 
 185         mcount_exit
 186 ENDPROC(ftrace_graph_caller)
 187 
 188 /*
 189  * void return_to_handler(void)
 190  *
 191  * Run ftrace_return_to_handler() before going back to parent.
 192  * @fp is checked against the value passed by ftrace_graph_caller().
 193  */
 194 ENTRY(return_to_handler)
 195         /* save return value regs */
 196         sub sp, sp, #64
 197         stp x0, x1, [sp]
 198         stp x2, x3, [sp, #16]
 199         stp x4, x5, [sp, #32]
 200         stp x6, x7, [sp, #48]
 201 
 202         mov     x0, x29                 //     parent's fp
 203         bl      ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
 204         mov     x30, x0                 // restore the original return address
 205 
 206         /* restore return value regs */
 207         ldp x0, x1, [sp]
 208         ldp x2, x3, [sp, #16]
 209         ldp x4, x5, [sp, #32]
 210         ldp x6, x7, [sp, #48]
 211         add sp, sp, #64
 212 
 213         ret
 214 END(return_to_handler)
 215 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */

/* [<][>][^][v][top][bottom][index][help] */