root/arch/arm64/include/asm/module.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. is_forbidden_offset_for_adrp
  2. plt_entry_is_initialized

   1 /* SPDX-License-Identifier: GPL-2.0-only */
   2 /*
   3  * Copyright (C) 2012 ARM Ltd.
   4  */
   5 #ifndef __ASM_MODULE_H
   6 #define __ASM_MODULE_H
   7 
   8 #include <asm-generic/module.h>
   9 
  10 #define MODULE_ARCH_VERMAGIC    "aarch64"
  11 
  12 #ifdef CONFIG_ARM64_MODULE_PLTS
  13 struct mod_plt_sec {
  14         int                     plt_shndx;
  15         int                     plt_num_entries;
  16         int                     plt_max_entries;
  17 };
  18 
  19 struct mod_arch_specific {
  20         struct mod_plt_sec      core;
  21         struct mod_plt_sec      init;
  22 
  23         /* for CONFIG_DYNAMIC_FTRACE */
  24         struct plt_entry        *ftrace_trampoline;
  25 };
  26 #endif
  27 
  28 u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
  29                           void *loc, const Elf64_Rela *rela,
  30                           Elf64_Sym *sym);
  31 
  32 u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs,
  33                                 void *loc, u64 val);
  34 
  35 #ifdef CONFIG_RANDOMIZE_BASE
  36 extern u64 module_alloc_base;
  37 #else
  38 #define module_alloc_base       ((u64)_etext - MODULES_VSIZE)
  39 #endif
  40 
  41 struct plt_entry {
  42         /*
  43          * A program that conforms to the AArch64 Procedure Call Standard
  44          * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or
  45          * IP1 (x17) may be inserted at any branch instruction that is
  46          * exposed to a relocation that supports long branches. Since that
  47          * is exactly what we are dealing with here, we are free to use x16
  48          * as a scratch register in the PLT veneers.
  49          */
  50         __le32  adrp;   /* adrp x16, ....                       */
  51         __le32  add;    /* add  x16, x16, #0x....               */
  52         __le32  br;     /* br   x16                             */
  53 };
  54 
  55 static inline bool is_forbidden_offset_for_adrp(void *place)
  56 {
  57         return IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) &&
  58                cpus_have_const_cap(ARM64_WORKAROUND_843419) &&
  59                ((u64)place & 0xfff) >= 0xff8;
  60 }
  61 
  62 struct plt_entry get_plt_entry(u64 dst, void *pc);
  63 bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b);
  64 
  65 static inline bool plt_entry_is_initialized(const struct plt_entry *e)
  66 {
  67         return e->adrp || e->add || e->br;
  68 }
  69 
  70 #endif /* __ASM_MODULE_H */

/* [<][>][^][v][top][bottom][index][help] */