root/arch/arm64/include/asm/processor.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. arch_thread_struct_whitelist
  2. start_thread_common
  3. set_ssbs_bit
  4. set_compat_ssbs_bit
  5. start_thread
  6. compat_start_thread
  7. cpu_relax
  8. prefetch
  9. prefetchw
  10. spin_lock_prefetch

   1 /* SPDX-License-Identifier: GPL-2.0-only */
   2 /*
   3  * Based on arch/arm/include/asm/processor.h
   4  *
   5  * Copyright (C) 1995-1999 Russell King
   6  * Copyright (C) 2012 ARM Ltd.
   7  */
   8 #ifndef __ASM_PROCESSOR_H
   9 #define __ASM_PROCESSOR_H
  10 
  11 #define KERNEL_DS               UL(-1)
  12 #define USER_DS                 ((UL(1) << MAX_USER_VA_BITS) - 1)
  13 
  14 /*
  15  * On arm64 systems, unaligned accesses by the CPU are cheap, and so there is
  16  * no point in shifting all network buffers by 2 bytes just to make some IP
  17  * header fields appear aligned in memory, potentially sacrificing some DMA
  18  * performance on some platforms.
  19  */
  20 #define NET_IP_ALIGN    0
  21 
  22 #ifndef __ASSEMBLY__
  23 
  24 #include <linux/build_bug.h>
  25 #include <linux/cache.h>
  26 #include <linux/init.h>
  27 #include <linux/stddef.h>
  28 #include <linux/string.h>
  29 
  30 #include <asm/alternative.h>
  31 #include <asm/cpufeature.h>
  32 #include <asm/hw_breakpoint.h>
  33 #include <asm/lse.h>
  34 #include <asm/pgtable-hwdef.h>
  35 #include <asm/pointer_auth.h>
  36 #include <asm/ptrace.h>
  37 #include <asm/types.h>
  38 
  39 /*
  40  * TASK_SIZE - the maximum size of a user space task.
  41  * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
  42  */
  43 
  44 #define DEFAULT_MAP_WINDOW_64   (UL(1) << VA_BITS_MIN)
  45 #define TASK_SIZE_64            (UL(1) << vabits_actual)
  46 
  47 #ifdef CONFIG_COMPAT
  48 #if defined(CONFIG_ARM64_64K_PAGES) && defined(CONFIG_KUSER_HELPERS)
  49 /*
  50  * With CONFIG_ARM64_64K_PAGES enabled, the last page is occupied
  51  * by the compat vectors page.
  52  */
  53 #define TASK_SIZE_32            UL(0x100000000)
  54 #else
  55 #define TASK_SIZE_32            (UL(0x100000000) - PAGE_SIZE)
  56 #endif /* CONFIG_ARM64_64K_PAGES */
  57 #define TASK_SIZE               (test_thread_flag(TIF_32BIT) ? \
  58                                 TASK_SIZE_32 : TASK_SIZE_64)
  59 #define TASK_SIZE_OF(tsk)       (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
  60                                 TASK_SIZE_32 : TASK_SIZE_64)
  61 #define DEFAULT_MAP_WINDOW      (test_thread_flag(TIF_32BIT) ? \
  62                                 TASK_SIZE_32 : DEFAULT_MAP_WINDOW_64)
  63 #else
  64 #define TASK_SIZE               TASK_SIZE_64
  65 #define DEFAULT_MAP_WINDOW      DEFAULT_MAP_WINDOW_64
  66 #endif /* CONFIG_COMPAT */
  67 
  68 #ifdef CONFIG_ARM64_FORCE_52BIT
  69 #define STACK_TOP_MAX           TASK_SIZE_64
  70 #define TASK_UNMAPPED_BASE      (PAGE_ALIGN(TASK_SIZE / 4))
  71 #else
  72 #define STACK_TOP_MAX           DEFAULT_MAP_WINDOW_64
  73 #define TASK_UNMAPPED_BASE      (PAGE_ALIGN(DEFAULT_MAP_WINDOW / 4))
  74 #endif /* CONFIG_ARM64_FORCE_52BIT */
  75 
  76 #ifdef CONFIG_COMPAT
  77 #define AARCH32_VECTORS_BASE    0xffff0000
  78 #define STACK_TOP               (test_thread_flag(TIF_32BIT) ? \
  79                                 AARCH32_VECTORS_BASE : STACK_TOP_MAX)
  80 #else
  81 #define STACK_TOP               STACK_TOP_MAX
  82 #endif /* CONFIG_COMPAT */
  83 
  84 #ifndef CONFIG_ARM64_FORCE_52BIT
  85 #define arch_get_mmap_end(addr) ((addr > DEFAULT_MAP_WINDOW) ? TASK_SIZE :\
  86                                 DEFAULT_MAP_WINDOW)
  87 
  88 #define arch_get_mmap_base(addr, base) ((addr > DEFAULT_MAP_WINDOW) ? \
  89                                         base + TASK_SIZE - DEFAULT_MAP_WINDOW :\
  90                                         base)
  91 #endif /* CONFIG_ARM64_FORCE_52BIT */
  92 
  93 extern phys_addr_t arm64_dma_phys_limit;
  94 #define ARCH_LOW_ADDRESS_LIMIT  (arm64_dma_phys_limit - 1)
  95 
  96 struct debug_info {
  97 #ifdef CONFIG_HAVE_HW_BREAKPOINT
  98         /* Have we suspended stepping by a debugger? */
  99         int                     suspended_step;
 100         /* Allow breakpoints and watchpoints to be disabled for this thread. */
 101         int                     bps_disabled;
 102         int                     wps_disabled;
 103         /* Hardware breakpoints pinned to this task. */
 104         struct perf_event       *hbp_break[ARM_MAX_BRP];
 105         struct perf_event       *hbp_watch[ARM_MAX_WRP];
 106 #endif
 107 };
 108 
 109 struct cpu_context {
 110         unsigned long x19;
 111         unsigned long x20;
 112         unsigned long x21;
 113         unsigned long x22;
 114         unsigned long x23;
 115         unsigned long x24;
 116         unsigned long x25;
 117         unsigned long x26;
 118         unsigned long x27;
 119         unsigned long x28;
 120         unsigned long fp;
 121         unsigned long sp;
 122         unsigned long pc;
 123 };
 124 
 125 struct thread_struct {
 126         struct cpu_context      cpu_context;    /* cpu context */
 127 
 128         /*
 129          * Whitelisted fields for hardened usercopy:
 130          * Maintainers must ensure manually that this contains no
 131          * implicit padding.
 132          */
 133         struct {
 134                 unsigned long   tp_value;       /* TLS register */
 135                 unsigned long   tp2_value;
 136                 struct user_fpsimd_state fpsimd_state;
 137         } uw;
 138 
 139         unsigned int            fpsimd_cpu;
 140         void                    *sve_state;     /* SVE registers, if any */
 141         unsigned int            sve_vl;         /* SVE vector length */
 142         unsigned int            sve_vl_onexec;  /* SVE vl after next exec */
 143         unsigned long           fault_address;  /* fault info */
 144         unsigned long           fault_code;     /* ESR_EL1 value */
 145         struct debug_info       debug;          /* debugging */
 146 #ifdef CONFIG_ARM64_PTR_AUTH
 147         struct ptrauth_keys     keys_user;
 148 #endif
 149 };
 150 
 151 static inline void arch_thread_struct_whitelist(unsigned long *offset,
 152                                                 unsigned long *size)
 153 {
 154         /* Verify that there is no padding among the whitelisted fields: */
 155         BUILD_BUG_ON(sizeof_field(struct thread_struct, uw) !=
 156                      sizeof_field(struct thread_struct, uw.tp_value) +
 157                      sizeof_field(struct thread_struct, uw.tp2_value) +
 158                      sizeof_field(struct thread_struct, uw.fpsimd_state));
 159 
 160         *offset = offsetof(struct thread_struct, uw);
 161         *size = sizeof_field(struct thread_struct, uw);
 162 }
 163 
 164 #ifdef CONFIG_COMPAT
 165 #define task_user_tls(t)                                                \
 166 ({                                                                      \
 167         unsigned long *__tls;                                           \
 168         if (is_compat_thread(task_thread_info(t)))                      \
 169                 __tls = &(t)->thread.uw.tp2_value;                      \
 170         else                                                            \
 171                 __tls = &(t)->thread.uw.tp_value;                       \
 172         __tls;                                                          \
 173  })
 174 #else
 175 #define task_user_tls(t)        (&(t)->thread.uw.tp_value)
 176 #endif
 177 
 178 /* Sync TPIDR_EL0 back to thread_struct for current */
 179 void tls_preserve_current_state(void);
 180 
 181 #define INIT_THREAD {                           \
 182         .fpsimd_cpu = NR_CPUS,                  \
 183 }
 184 
 185 static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
 186 {
 187         memset(regs, 0, sizeof(*regs));
 188         forget_syscall(regs);
 189         regs->pc = pc;
 190 
 191         if (system_uses_irq_prio_masking())
 192                 regs->pmr_save = GIC_PRIO_IRQON;
 193 }
 194 
 195 static inline void set_ssbs_bit(struct pt_regs *regs)
 196 {
 197         regs->pstate |= PSR_SSBS_BIT;
 198 }
 199 
 200 static inline void set_compat_ssbs_bit(struct pt_regs *regs)
 201 {
 202         regs->pstate |= PSR_AA32_SSBS_BIT;
 203 }
 204 
 205 static inline void start_thread(struct pt_regs *regs, unsigned long pc,
 206                                 unsigned long sp)
 207 {
 208         start_thread_common(regs, pc);
 209         regs->pstate = PSR_MODE_EL0t;
 210 
 211         if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
 212                 set_ssbs_bit(regs);
 213 
 214         regs->sp = sp;
 215 }
 216 
 217 #ifdef CONFIG_COMPAT
 218 static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
 219                                        unsigned long sp)
 220 {
 221         start_thread_common(regs, pc);
 222         regs->pstate = PSR_AA32_MODE_USR;
 223         if (pc & 1)
 224                 regs->pstate |= PSR_AA32_T_BIT;
 225 
 226 #ifdef __AARCH64EB__
 227         regs->pstate |= PSR_AA32_E_BIT;
 228 #endif
 229 
 230         if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
 231                 set_compat_ssbs_bit(regs);
 232 
 233         regs->compat_sp = sp;
 234 }
 235 #endif
 236 
 237 /* Forward declaration, a strange C thing */
 238 struct task_struct;
 239 
 240 /* Free all resources held by a thread. */
 241 extern void release_thread(struct task_struct *);
 242 
 243 unsigned long get_wchan(struct task_struct *p);
 244 
 245 static inline void cpu_relax(void)
 246 {
 247         asm volatile("yield" ::: "memory");
 248 }
 249 
 250 /* Thread switching */
 251 extern struct task_struct *cpu_switch_to(struct task_struct *prev,
 252                                          struct task_struct *next);
 253 
 254 #define task_pt_regs(p) \
 255         ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
 256 
 257 #define KSTK_EIP(tsk)   ((unsigned long)task_pt_regs(tsk)->pc)
 258 #define KSTK_ESP(tsk)   user_stack_pointer(task_pt_regs(tsk))
 259 
 260 /*
 261  * Prefetching support
 262  */
 263 #define ARCH_HAS_PREFETCH
 264 static inline void prefetch(const void *ptr)
 265 {
 266         asm volatile("prfm pldl1keep, %a0\n" : : "p" (ptr));
 267 }
 268 
 269 #define ARCH_HAS_PREFETCHW
 270 static inline void prefetchw(const void *ptr)
 271 {
 272         asm volatile("prfm pstl1keep, %a0\n" : : "p" (ptr));
 273 }
 274 
 275 #define ARCH_HAS_SPINLOCK_PREFETCH
 276 static inline void spin_lock_prefetch(const void *ptr)
 277 {
 278         asm volatile(ARM64_LSE_ATOMIC_INSN(
 279                      "prfm pstl1strm, %a0",
 280                      "nop") : : "p" (ptr));
 281 }
 282 
 283 extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */
 284 extern void __init minsigstksz_setup(void);
 285 
 286 /*
 287  * Not at the top of the file due to a direct #include cycle between
 288  * <asm/fpsimd.h> and <asm/processor.h>.  Deferring this #include
 289  * ensures that contents of processor.h are visible to fpsimd.h even if
 290  * processor.h is included first.
 291  *
 292  * These prctl helpers are the only things in this file that require
 293  * fpsimd.h.  The core code expects them to be in this header.
 294  */
 295 #include <asm/fpsimd.h>
 296 
 297 /* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */
 298 #define SVE_SET_VL(arg) sve_set_current_vl(arg)
 299 #define SVE_GET_VL()    sve_get_current_vl()
 300 
 301 /* PR_PAC_RESET_KEYS prctl */
 302 #define PAC_RESET_KEYS(tsk, arg)        ptrauth_prctl_reset_keys(tsk, arg)
 303 
 304 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
 305 /* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */
 306 long set_tagged_addr_ctrl(unsigned long arg);
 307 long get_tagged_addr_ctrl(void);
 308 #define SET_TAGGED_ADDR_CTRL(arg)       set_tagged_addr_ctrl(arg)
 309 #define GET_TAGGED_ADDR_CTRL()          get_tagged_addr_ctrl()
 310 #endif
 311 
 312 /*
 313  * For CONFIG_GCC_PLUGIN_STACKLEAK
 314  *
 315  * These need to be macros because otherwise we get stuck in a nightmare
 316  * of header definitions for the use of task_stack_page.
 317  */
 318 
 319 #define current_top_of_stack()                                                  \
 320 ({                                                                              \
 321         struct stack_info _info;                                                \
 322         BUG_ON(!on_accessible_stack(current, current_stack_pointer, &_info));   \
 323         _info.high;                                                             \
 324 })
 325 #define on_thread_stack()       (on_task_stack(current, current_stack_pointer, NULL))
 326 
 327 #endif /* __ASSEMBLY__ */
 328 #endif /* __ASM_PROCESSOR_H */

/* [<][>][^][v][top][bottom][index][help] */