root/arch/arm64/include/asm/ftrace.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. ftrace_call_adjust
  2. arch_trace_is_compat_syscall
  3. arch_syscall_match_sym_name

   1 /* SPDX-License-Identifier: GPL-2.0-only */
   2 /*
   3  * arch/arm64/include/asm/ftrace.h
   4  *
   5  * Copyright (C) 2013 Linaro Limited
   6  * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
   7  */
   8 #ifndef __ASM_FTRACE_H
   9 #define __ASM_FTRACE_H
  10 
  11 #include <asm/insn.h>
  12 
  13 #define HAVE_FUNCTION_GRAPH_FP_TEST
  14 #define MCOUNT_ADDR             ((unsigned long)_mcount)
  15 #define MCOUNT_INSN_SIZE        AARCH64_INSN_SIZE
  16 
  17 /*
  18  * Currently, gcc tends to save the link register after the local variables
  19  * on the stack. This causes the max stack tracer to report the function
  20  * frame sizes for the wrong functions. By defining
  21  * ARCH_FTRACE_SHIFT_STACK_TRACER, it will tell the stack tracer to expect
  22  * to find the return address on the stack after the local variables have
  23  * been set up.
  24  *
  25  * Note, this may change in the future, and we will need to deal with that
  26  * if it were to happen.
  27  */
  28 #define ARCH_FTRACE_SHIFT_STACK_TRACER 1
  29 
  30 #ifndef __ASSEMBLY__
  31 #include <linux/compat.h>
  32 
  33 extern void _mcount(unsigned long);
  34 extern void *return_address(unsigned int);
  35 
  36 struct dyn_arch_ftrace {
  37         /* No extra data needed for arm64 */
  38 };
  39 
  40 extern unsigned long ftrace_graph_call;
  41 
  42 extern void return_to_handler(void);
  43 
  44 static inline unsigned long ftrace_call_adjust(unsigned long addr)
  45 {
  46         /*
  47          * addr is the address of the mcount call instruction.
  48          * recordmcount does the necessary offset calculation.
  49          */
  50         return addr;
  51 }
  52 
  53 #define ftrace_return_address(n) return_address(n)
  54 
  55 /*
  56  * Because AArch32 mode does not share the same syscall table with AArch64,
  57  * tracing compat syscalls may result in reporting bogus syscalls or even
  58  * hang-up, so just do not trace them.
  59  * See kernel/trace/trace_syscalls.c
  60  *
  61  * x86 code says:
  62  * If the user really wants these, then they should use the
  63  * raw syscall tracepoints with filtering.
  64  */
  65 #define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
  66 static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
  67 {
  68         return is_compat_task();
  69 }
  70 
  71 #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
  72 
  73 static inline bool arch_syscall_match_sym_name(const char *sym,
  74                                                const char *name)
  75 {
  76         /*
  77          * Since all syscall functions have __arm64_ prefix, we must skip it.
  78          * However, as we described above, we decided to ignore compat
  79          * syscalls, so we don't care about __arm64_compat_ prefix here.
  80          */
  81         return !strcmp(sym + 8, name);
  82 }
  83 #endif /* ifndef __ASSEMBLY__ */
  84 
  85 #endif /* __ASM_FTRACE_H */

/* [<][>][^][v][top][bottom][index][help] */