root/arch/arm/kernel/entry-header.S

/* [<][>][^][v][top][bottom][index][help] */
   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #include <linux/init.h>
   3 #include <linux/linkage.h>
   4 
   5 #include <asm/assembler.h>
   6 #include <asm/asm-offsets.h>
   7 #include <asm/errno.h>
   8 #include <asm/thread_info.h>
   9 #include <asm/uaccess-asm.h>
  10 #include <asm/v7m.h>
  11 
  12 @ Bad Abort numbers
  13 @ -----------------
  14 @
  15 #define BAD_PREFETCH    0
  16 #define BAD_DATA        1
  17 #define BAD_ADDREXCPTN  2
  18 #define BAD_IRQ         3
  19 #define BAD_UNDEFINSTR  4
  20 
  21 @
  22 @ Most of the stack format comes from struct pt_regs, but with
  23 @ the addition of 8 bytes for storing syscall args 5 and 6.
  24 @ This _must_ remain a multiple of 8 for EABI.
  25 @
  26 #define S_OFF           8
  27 
  28 /* 
  29  * The SWI code relies on the fact that R0 is at the bottom of the stack
  30  * (due to slow/fast restore user regs).
  31  */
  32 #if S_R0 != 0
  33 #error "Please fix"
  34 #endif
  35 
  36         .macro  zero_fp
  37 #ifdef CONFIG_FRAME_POINTER
  38         mov     fp, #0
  39 #endif
  40         .endm
  41 
  42 #ifdef CONFIG_ALIGNMENT_TRAP
  43 #define ATRAP(x...) x
  44 #else
  45 #define ATRAP(x...)
  46 #endif
  47 
  48         .macro  alignment_trap, rtmp1, rtmp2, label
  49 #ifdef CONFIG_ALIGNMENT_TRAP
  50         mrc     p15, 0, \rtmp2, c1, c0, 0
  51         ldr     \rtmp1, \label
  52         ldr     \rtmp1, [\rtmp1]
  53         teq     \rtmp1, \rtmp2
  54         mcrne   p15, 0, \rtmp1, c1, c0, 0
  55 #endif
  56         .endm
  57 
  58 #ifdef CONFIG_CPU_V7M
  59 /*
  60  * ARMv7-M exception entry/exit macros.
  61  *
  62  * xPSR, ReturnAddress(), LR (R14), R12, R3, R2, R1, and R0 are
  63  * automatically saved on the current stack (32 words) before
  64  * switching to the exception stack (SP_main).
  65  *
  66  * If exception is taken while in user mode, SP_main is
  67  * empty. Otherwise, SP_main is aligned to 64 bit automatically
  68  * (CCR.STKALIGN set).
  69  *
  70  * Linux assumes that the interrupts are disabled when entering an
  71  * exception handler and it may BUG if this is not the case. Interrupts
  72  * are disabled during entry and reenabled in the exit macro.
  73  *
  74  * v7m_exception_slow_exit is used when returning from SVC or PendSV.
  75  * When returning to kernel mode, we don't return from exception.
  76  */
  77         .macro  v7m_exception_entry
  78         @ determine the location of the registers saved by the core during
  79         @ exception entry. Depending on the mode the cpu was in when the
  80         @ exception happend that is either on the main or the process stack.
  81         @ Bit 2 of EXC_RETURN stored in the lr register specifies which stack
  82         @ was used.
  83         tst     lr, #EXC_RET_STACK_MASK
  84         mrsne   r12, psp
  85         moveq   r12, sp
  86 
  87         @ we cannot rely on r0-r3 and r12 matching the value saved in the
  88         @ exception frame because of tail-chaining. So these have to be
  89         @ reloaded.
  90         ldmia   r12!, {r0-r3}
  91 
  92         @ Linux expects to have irqs off. Do it here before taking stack space
  93         cpsid   i
  94 
  95         sub     sp, #PT_REGS_SIZE-S_IP
  96         stmdb   sp!, {r0-r11}
  97 
  98         @ load saved r12, lr, return address and xPSR.
  99         @ r0-r7 are used for signals and never touched from now on. Clobbering
 100         @ r8-r12 is OK.
 101         mov     r9, r12
 102         ldmia   r9!, {r8, r10-r12}
 103 
 104         @ calculate the original stack pointer value.
 105         @ r9 currently points to the memory location just above the auto saved
 106         @ xPSR.
 107         @ The cpu might automatically 8-byte align the stack. Bit 9
 108         @ of the saved xPSR specifies if stack aligning took place. In this case
 109         @ another 32-bit value is included in the stack.
 110 
 111         tst     r12, V7M_xPSR_FRAMEPTRALIGN
 112         addne   r9, r9, #4
 113 
 114         @ store saved r12 using str to have a register to hold the base for stm
 115         str     r8, [sp, #S_IP]
 116         add     r8, sp, #S_SP
 117         @ store r13-r15, xPSR
 118         stmia   r8!, {r9-r12}
 119         @ store old_r0
 120         str     r0, [r8]
 121         .endm
 122 
 123         /*
 124          * PENDSV and SVCALL are configured to have the same exception
 125          * priorities. As a kernel thread runs at SVCALL execution priority it
 126          * can never be preempted and so we will never have to return to a
 127          * kernel thread here.
 128          */
 129         .macro  v7m_exception_slow_exit ret_r0
 130         cpsid   i
 131         ldr     lr, =exc_ret
 132         ldr     lr, [lr]
 133 
 134         @ read original r12, sp, lr, pc and xPSR
 135         add     r12, sp, #S_IP
 136         ldmia   r12, {r1-r5}
 137 
 138         @ an exception frame is always 8-byte aligned. To tell the hardware if
 139         @ the sp to be restored is aligned or not set bit 9 of the saved xPSR
 140         @ accordingly.
 141         tst     r2, #4
 142         subne   r2, r2, #4
 143         orrne   r5, V7M_xPSR_FRAMEPTRALIGN
 144         biceq   r5, V7M_xPSR_FRAMEPTRALIGN
 145 
 146         @ ensure bit 0 is cleared in the PC, otherwise behaviour is
 147         @ unpredictable
 148         bic     r4, #1
 149 
 150         @ write basic exception frame
 151         stmdb   r2!, {r1, r3-r5}
 152         ldmia   sp, {r1, r3-r5}
 153         .if     \ret_r0
 154         stmdb   r2!, {r0, r3-r5}
 155         .else
 156         stmdb   r2!, {r1, r3-r5}
 157         .endif
 158 
 159         @ restore process sp
 160         msr     psp, r2
 161 
 162         @ restore original r4-r11
 163         ldmia   sp!, {r0-r11}
 164 
 165         @ restore main sp
 166         add     sp, sp, #PT_REGS_SIZE-S_IP
 167 
 168         cpsie   i
 169         bx      lr
 170         .endm
 171 #endif  /* CONFIG_CPU_V7M */
 172 
 173         @
 174         @ Store/load the USER SP and LR registers by switching to the SYS
 175         @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not
 176         @ available. Should only be called from SVC mode
 177         @
 178         .macro  store_user_sp_lr, rd, rtemp, offset = 0
 179         mrs     \rtemp, cpsr
 180         eor     \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
 181         msr     cpsr_c, \rtemp                  @ switch to the SYS mode
 182 
 183         str     sp, [\rd, #\offset]             @ save sp_usr
 184         str     lr, [\rd, #\offset + 4]         @ save lr_usr
 185 
 186         eor     \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
 187         msr     cpsr_c, \rtemp                  @ switch back to the SVC mode
 188         .endm
 189 
 190         .macro  load_user_sp_lr, rd, rtemp, offset = 0
 191         mrs     \rtemp, cpsr
 192         eor     \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
 193         msr     cpsr_c, \rtemp                  @ switch to the SYS mode
 194 
 195         ldr     sp, [\rd, #\offset]             @ load sp_usr
 196         ldr     lr, [\rd, #\offset + 4]         @ load lr_usr
 197 
 198         eor     \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
 199         msr     cpsr_c, \rtemp                  @ switch back to the SVC mode
 200         .endm
 201 
 202 
 203         .macro  svc_exit, rpsr, irq = 0
 204         .if     \irq != 0
 205         @ IRQs already off
 206 #ifdef CONFIG_TRACE_IRQFLAGS
 207         @ The parent context IRQs must have been enabled to get here in
 208         @ the first place, so there's no point checking the PSR I bit.
 209         bl      trace_hardirqs_on
 210 #endif
 211         .else
 212         @ IRQs off again before pulling preserved data off the stack
 213         disable_irq_notrace
 214 #ifdef CONFIG_TRACE_IRQFLAGS
 215         tst     \rpsr, #PSR_I_BIT
 216         bleq    trace_hardirqs_on
 217         tst     \rpsr, #PSR_I_BIT
 218         blne    trace_hardirqs_off
 219 #endif
 220         .endif
 221         uaccess_exit tsk, r0, r1
 222 
 223 #ifndef CONFIG_THUMB2_KERNEL
 224         @ ARM mode SVC restore
 225         msr     spsr_cxsf, \rpsr
 226 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
 227         @ We must avoid clrex due to Cortex-A15 erratum #830321
 228         sub     r0, sp, #4                      @ uninhabited address
 229         strex   r1, r2, [r0]                    @ clear the exclusive monitor
 230 #endif
 231         ldmia   sp, {r0 - pc}^                  @ load r0 - pc, cpsr
 232 #else
 233         @ Thumb mode SVC restore
 234         ldr     lr, [sp, #S_SP]                 @ top of the stack
 235         ldrd    r0, r1, [sp, #S_LR]             @ calling lr and pc
 236 
 237         @ We must avoid clrex due to Cortex-A15 erratum #830321
 238         strex   r2, r1, [sp, #S_LR]             @ clear the exclusive monitor
 239 
 240         stmdb   lr!, {r0, r1, \rpsr}            @ calling lr and rfe context
 241         ldmia   sp, {r0 - r12}
 242         mov     sp, lr
 243         ldr     lr, [sp], #4
 244         rfeia   sp!
 245 #endif
 246         .endm
 247 
 248         @
 249         @ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit
 250         @
 251         @ This macro acts in a similar manner to svc_exit but switches to FIQ
 252         @ mode to restore the final part of the register state.
 253         @
 254         @ We cannot use the normal svc_exit procedure because that would
 255         @ clobber spsr_svc (FIQ could be delivered during the first few
 256         @ instructions of vector_swi meaning its contents have not been
 257         @ saved anywhere).
 258         @
 259         @ Note that, unlike svc_exit, this macro also does not allow a caller
 260         @ supplied rpsr. This is because the FIQ exceptions are not re-entrant
 261         @ and the handlers cannot call into the scheduler (meaning the value
 262         @ on the stack remains correct).
 263         @
 264         .macro  svc_exit_via_fiq
 265         uaccess_exit tsk, r0, r1
 266 #ifndef CONFIG_THUMB2_KERNEL
 267         @ ARM mode restore
 268         mov     r0, sp
 269         ldmib   r0, {r1 - r14}  @ abort is deadly from here onward (it will
 270                                 @ clobber state restored below)
 271         msr     cpsr_c, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
 272         add     r8, r0, #S_PC
 273         ldr     r9, [r0, #S_PSR]
 274         msr     spsr_cxsf, r9
 275         ldr     r0, [r0, #S_R0]
 276         ldmia   r8, {pc}^
 277 #else
 278         @ Thumb mode restore
 279         add     r0, sp, #S_R2
 280         ldr     lr, [sp, #S_LR]
 281         ldr     sp, [sp, #S_SP] @ abort is deadly from here onward (it will
 282                                 @ clobber state restored below)
 283         ldmia   r0, {r2 - r12}
 284         mov     r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
 285         msr     cpsr_c, r1
 286         sub     r0, #S_R2
 287         add     r8, r0, #S_PC
 288         ldmia   r0, {r0 - r1}
 289         rfeia   r8
 290 #endif
 291         .endm
 292 
 293 
 294         .macro  restore_user_regs, fast = 0, offset = 0
 295         uaccess_enable r1, isb=0
 296 #ifndef CONFIG_THUMB2_KERNEL
 297         @ ARM mode restore
 298         mov     r2, sp
 299         ldr     r1, [r2, #\offset + S_PSR]      @ get calling cpsr
 300         ldr     lr, [r2, #\offset + S_PC]!      @ get pc
 301         tst     r1, #PSR_I_BIT | 0x0f
 302         bne     1f
 303         msr     spsr_cxsf, r1                   @ save in spsr_svc
 304 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
 305         @ We must avoid clrex due to Cortex-A15 erratum #830321
 306         strex   r1, r2, [r2]                    @ clear the exclusive monitor
 307 #endif
 308         .if     \fast
 309         ldmdb   r2, {r1 - lr}^                  @ get calling r1 - lr
 310         .else
 311         ldmdb   r2, {r0 - lr}^                  @ get calling r0 - lr
 312         .endif
 313         mov     r0, r0                          @ ARMv5T and earlier require a nop
 314                                                 @ after ldm {}^
 315         add     sp, sp, #\offset + PT_REGS_SIZE
 316         movs    pc, lr                          @ return & move spsr_svc into cpsr
 317 1:      bug     "Returning to usermode but unexpected PSR bits set?", \@
 318 #elif defined(CONFIG_CPU_V7M)
 319         @ V7M restore.
 320         @ Note that we don't need to do clrex here as clearing the local
 321         @ monitor is part of the exception entry and exit sequence.
 322         .if     \offset
 323         add     sp, #\offset
 324         .endif
 325         v7m_exception_slow_exit ret_r0 = \fast
 326 #else
 327         @ Thumb mode restore
 328         mov     r2, sp
 329         load_user_sp_lr r2, r3, \offset + S_SP  @ calling sp, lr
 330         ldr     r1, [sp, #\offset + S_PSR]      @ get calling cpsr
 331         ldr     lr, [sp, #\offset + S_PC]       @ get pc
 332         add     sp, sp, #\offset + S_SP
 333         tst     r1, #PSR_I_BIT | 0x0f
 334         bne     1f
 335         msr     spsr_cxsf, r1                   @ save in spsr_svc
 336 
 337         @ We must avoid clrex due to Cortex-A15 erratum #830321
 338         strex   r1, r2, [sp]                    @ clear the exclusive monitor
 339 
 340         .if     \fast
 341         ldmdb   sp, {r1 - r12}                  @ get calling r1 - r12
 342         .else
 343         ldmdb   sp, {r0 - r12}                  @ get calling r0 - r12
 344         .endif
 345         add     sp, sp, #PT_REGS_SIZE - S_SP
 346         movs    pc, lr                          @ return & move spsr_svc into cpsr
 347 1:      bug     "Returning to usermode but unexpected PSR bits set?", \@
 348 #endif  /* !CONFIG_THUMB2_KERNEL */
 349         .endm
 350 
 351 /*
 352  * Context tracking subsystem.  Used to instrument transitions
 353  * between user and kernel mode.
 354  */
 355         .macro ct_user_exit, save = 1
 356 #ifdef CONFIG_CONTEXT_TRACKING
 357         .if     \save
 358         stmdb   sp!, {r0-r3, ip, lr}
 359         bl      context_tracking_user_exit
 360         ldmia   sp!, {r0-r3, ip, lr}
 361         .else
 362         bl      context_tracking_user_exit
 363         .endif
 364 #endif
 365         .endm
 366 
 367         .macro ct_user_enter, save = 1
 368 #ifdef CONFIG_CONTEXT_TRACKING
 369         .if     \save
 370         stmdb   sp!, {r0-r3, ip, lr}
 371         bl      context_tracking_user_enter
 372         ldmia   sp!, {r0-r3, ip, lr}
 373         .else
 374         bl      context_tracking_user_enter
 375         .endif
 376 #endif
 377         .endm
 378 
 379         .macro  invoke_syscall, table, nr, tmp, ret, reload=0
 380 #ifdef CONFIG_CPU_SPECTRE
 381         mov     \tmp, \nr
 382         cmp     \tmp, #NR_syscalls              @ check upper syscall limit
 383         movcs   \tmp, #0
 384         csdb
 385         badr    lr, \ret                        @ return address
 386         .if     \reload
 387         add     r1, sp, #S_R0 + S_OFF           @ pointer to regs
 388         ldmiacc r1, {r0 - r6}                   @ reload r0-r6
 389         stmiacc sp, {r4, r5}                    @ update stack arguments
 390         .endif
 391         ldrcc   pc, [\table, \tmp, lsl #2]      @ call sys_* routine
 392 #else
 393         cmp     \nr, #NR_syscalls               @ check upper syscall limit
 394         badr    lr, \ret                        @ return address
 395         .if     \reload
 396         add     r1, sp, #S_R0 + S_OFF           @ pointer to regs
 397         ldmiacc r1, {r0 - r6}                   @ reload r0-r6
 398         stmiacc sp, {r4, r5}                    @ update stack arguments
 399         .endif
 400         ldrcc   pc, [\table, \nr, lsl #2]       @ call sys_* routine
 401 #endif
 402         .endm
 403 
 404 /*
 405  * These are the registers used in the syscall handler, and allow us to
 406  * have in theory up to 7 arguments to a function - r0 to r6.
 407  *
 408  * r7 is reserved for the system call number for thumb mode.
 409  *
 410  * Note that tbl == why is intentional.
 411  *
 412  * We must set at least "tsk" and "why" when calling ret_with_reschedule.
 413  */
 414 scno    .req    r7              @ syscall number
 415 tbl     .req    r8              @ syscall table pointer
 416 why     .req    r8              @ Linux syscall (!= 0)
 417 tsk     .req    r9              @ current thread_info

/* [<][>][^][v][top][bottom][index][help] */