root/arch/arm64/mm/proc.S

/* [<][>][^][v][top][bottom][index][help] */
   1 /* SPDX-License-Identifier: GPL-2.0-only */
   2 /*
   3  * Based on arch/arm/mm/proc.S
   4  *
   5  * Copyright (C) 2001 Deep Blue Solutions Ltd.
   6  * Copyright (C) 2012 ARM Ltd.
   7  * Author: Catalin Marinas <catalin.marinas@arm.com>
   8  */
   9 
  10 #include <linux/init.h>
  11 #include <linux/linkage.h>
  12 #include <asm/assembler.h>
  13 #include <asm/asm-offsets.h>
  14 #include <asm/hwcap.h>
  15 #include <asm/pgtable.h>
  16 #include <asm/pgtable-hwdef.h>
  17 #include <asm/cpufeature.h>
  18 #include <asm/alternative.h>
  19 
  20 #ifdef CONFIG_ARM64_64K_PAGES
  21 #define TCR_TG_FLAGS    TCR_TG0_64K | TCR_TG1_64K
  22 #elif defined(CONFIG_ARM64_16K_PAGES)
  23 #define TCR_TG_FLAGS    TCR_TG0_16K | TCR_TG1_16K
  24 #else /* CONFIG_ARM64_4K_PAGES */
  25 #define TCR_TG_FLAGS    TCR_TG0_4K | TCR_TG1_4K
  26 #endif
  27 
  28 #ifdef CONFIG_RANDOMIZE_BASE
  29 #define TCR_KASLR_FLAGS TCR_NFD1
  30 #else
  31 #define TCR_KASLR_FLAGS 0
  32 #endif
  33 
  34 #define TCR_SMP_FLAGS   TCR_SHARED
  35 
  36 /* PTWs cacheable, inner/outer WBWA */
  37 #define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
  38 
  39 #ifdef CONFIG_KASAN_SW_TAGS
  40 #define TCR_KASAN_FLAGS TCR_TBI1
  41 #else
  42 #define TCR_KASAN_FLAGS 0
  43 #endif
  44 
  45 #define MAIR(attr, mt)  ((attr) << ((mt) * 8))
  46 
  47 #ifdef CONFIG_CPU_PM
  48 /**
  49  * cpu_do_suspend - save CPU registers context
  50  *
  51  * x0: virtual address of context pointer
  52  */
  53 ENTRY(cpu_do_suspend)
  54         mrs     x2, tpidr_el0
  55         mrs     x3, tpidrro_el0
  56         mrs     x4, contextidr_el1
  57         mrs     x5, osdlr_el1
  58         mrs     x6, cpacr_el1
  59         mrs     x7, tcr_el1
  60         mrs     x8, vbar_el1
  61         mrs     x9, mdscr_el1
  62         mrs     x10, oslsr_el1
  63         mrs     x11, sctlr_el1
  64 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
  65         mrs     x12, tpidr_el1
  66 alternative_else
  67         mrs     x12, tpidr_el2
  68 alternative_endif
  69         mrs     x13, sp_el0
  70         stp     x2, x3, [x0]
  71         stp     x4, x5, [x0, #16]
  72         stp     x6, x7, [x0, #32]
  73         stp     x8, x9, [x0, #48]
  74         stp     x10, x11, [x0, #64]
  75         stp     x12, x13, [x0, #80]
  76         ret
  77 ENDPROC(cpu_do_suspend)
  78 
  79 /**
  80  * cpu_do_resume - restore CPU register context
  81  *
  82  * x0: Address of context pointer
  83  */
  84         .pushsection ".idmap.text", "awx"
  85 ENTRY(cpu_do_resume)
  86         ldp     x2, x3, [x0]
  87         ldp     x4, x5, [x0, #16]
  88         ldp     x6, x8, [x0, #32]
  89         ldp     x9, x10, [x0, #48]
  90         ldp     x11, x12, [x0, #64]
  91         ldp     x13, x14, [x0, #80]
  92         msr     tpidr_el0, x2
  93         msr     tpidrro_el0, x3
  94         msr     contextidr_el1, x4
  95         msr     cpacr_el1, x6
  96 
  97         /* Don't change t0sz here, mask those bits when restoring */
  98         mrs     x7, tcr_el1
  99         bfi     x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
 100 
 101         msr     tcr_el1, x8
 102         msr     vbar_el1, x9
 103 
 104         /*
 105          * __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking
 106          * debug exceptions. By restoring MDSCR_EL1 here, we may take a debug
 107          * exception. Mask them until local_daif_restore() in cpu_suspend()
 108          * resets them.
 109          */
 110         disable_daif
 111         msr     mdscr_el1, x10
 112 
 113         msr     sctlr_el1, x12
 114 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
 115         msr     tpidr_el1, x13
 116 alternative_else
 117         msr     tpidr_el2, x13
 118 alternative_endif
 119         msr     sp_el0, x14
 120         /*
 121          * Restore oslsr_el1 by writing oslar_el1
 122          */
 123         msr     osdlr_el1, x5
 124         ubfx    x11, x11, #1, #1
 125         msr     oslar_el1, x11
 126         reset_pmuserenr_el0 x0                  // Disable PMU access from EL0
 127 
 128 alternative_if ARM64_HAS_RAS_EXTN
 129         msr_s   SYS_DISR_EL1, xzr
 130 alternative_else_nop_endif
 131 
 132         isb
 133         ret
 134 ENDPROC(cpu_do_resume)
 135         .popsection
 136 #endif
 137 
 138 /*
 139  *      cpu_do_switch_mm(pgd_phys, tsk)
 140  *
 141  *      Set the translation table base pointer to be pgd_phys.
 142  *
 143  *      - pgd_phys - physical address of new TTB
 144  */
 145 ENTRY(cpu_do_switch_mm)
 146         mrs     x2, ttbr1_el1
 147         mmid    x1, x1                          // get mm->context.id
 148         phys_to_ttbr x3, x0
 149 
 150 alternative_if ARM64_HAS_CNP
 151         cbz     x1, 1f                          // skip CNP for reserved ASID
 152         orr     x3, x3, #TTBR_CNP_BIT
 153 1:
 154 alternative_else_nop_endif
 155 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
 156         bfi     x3, x1, #48, #16                // set the ASID field in TTBR0
 157 #endif
 158         bfi     x2, x1, #48, #16                // set the ASID
 159         msr     ttbr1_el1, x2                   // in TTBR1 (since TCR.A1 is set)
 160         isb
 161         msr     ttbr0_el1, x3                   // now update TTBR0
 162         isb
 163         b       post_ttbr_update_workaround     // Back to C code...
 164 ENDPROC(cpu_do_switch_mm)
 165 
 166         .pushsection ".idmap.text", "awx"
 167 
 168 .macro  __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
 169         adrp    \tmp1, empty_zero_page
 170         phys_to_ttbr \tmp2, \tmp1
 171         offset_ttbr1 \tmp2, \tmp1
 172         msr     ttbr1_el1, \tmp2
 173         isb
 174         tlbi    vmalle1
 175         dsb     nsh
 176         isb
 177 .endm
 178 
 179 /*
 180  * void idmap_cpu_replace_ttbr1(phys_addr_t ttbr1)
 181  *
 182  * This is the low-level counterpart to cpu_replace_ttbr1, and should not be
 183  * called by anything else. It can only be executed from a TTBR0 mapping.
 184  */
 185 ENTRY(idmap_cpu_replace_ttbr1)
 186         save_and_disable_daif flags=x2
 187 
 188         __idmap_cpu_set_reserved_ttbr1 x1, x3
 189 
 190         offset_ttbr1 x0, x3
 191         msr     ttbr1_el1, x0
 192         isb
 193 
 194         restore_daif x2
 195 
 196         ret
 197 ENDPROC(idmap_cpu_replace_ttbr1)
 198         .popsection
 199 
 200 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 201         .pushsection ".idmap.text", "awx"
 202 
 203         .macro  __idmap_kpti_get_pgtable_ent, type
 204         dc      cvac, cur_\()\type\()p          // Ensure any existing dirty
 205         dmb     sy                              // lines are written back before
 206         ldr     \type, [cur_\()\type\()p]       // loading the entry
 207         tbz     \type, #0, skip_\()\type        // Skip invalid and
 208         tbnz    \type, #11, skip_\()\type       // non-global entries
 209         .endm
 210 
 211         .macro __idmap_kpti_put_pgtable_ent_ng, type
 212         orr     \type, \type, #PTE_NG           // Same bit for blocks and pages
 213         str     \type, [cur_\()\type\()p]       // Update the entry and ensure
 214         dmb     sy                              // that it is visible to all
 215         dc      civac, cur_\()\type\()p         // CPUs.
 216         .endm
 217 
 218 /*
 219  * void __kpti_install_ng_mappings(int cpu, int num_cpus, phys_addr_t swapper)
 220  *
 221  * Called exactly once from stop_machine context by each CPU found during boot.
 222  */
 223 __idmap_kpti_flag:
 224         .long   1
 225 ENTRY(idmap_kpti_install_ng_mappings)
 226         cpu             .req    w0
 227         num_cpus        .req    w1
 228         swapper_pa      .req    x2
 229         swapper_ttb     .req    x3
 230         flag_ptr        .req    x4
 231         cur_pgdp        .req    x5
 232         end_pgdp        .req    x6
 233         pgd             .req    x7
 234         cur_pudp        .req    x8
 235         end_pudp        .req    x9
 236         pud             .req    x10
 237         cur_pmdp        .req    x11
 238         end_pmdp        .req    x12
 239         pmd             .req    x13
 240         cur_ptep        .req    x14
 241         end_ptep        .req    x15
 242         pte             .req    x16
 243 
 244         mrs     swapper_ttb, ttbr1_el1
 245         restore_ttbr1   swapper_ttb
 246         adr     flag_ptr, __idmap_kpti_flag
 247 
 248         cbnz    cpu, __idmap_kpti_secondary
 249 
 250         /* We're the boot CPU. Wait for the others to catch up */
 251         sevl
 252 1:      wfe
 253         ldaxr   w18, [flag_ptr]
 254         eor     w18, w18, num_cpus
 255         cbnz    w18, 1b
 256 
 257         /* We need to walk swapper, so turn off the MMU. */
 258         pre_disable_mmu_workaround
 259         mrs     x18, sctlr_el1
 260         bic     x18, x18, #SCTLR_ELx_M
 261         msr     sctlr_el1, x18
 262         isb
 263 
 264         /* Everybody is enjoying the idmap, so we can rewrite swapper. */
 265         /* PGD */
 266         mov     cur_pgdp, swapper_pa
 267         add     end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8)
 268 do_pgd: __idmap_kpti_get_pgtable_ent    pgd
 269         tbnz    pgd, #1, walk_puds
 270 next_pgd:
 271         __idmap_kpti_put_pgtable_ent_ng pgd
 272 skip_pgd:
 273         add     cur_pgdp, cur_pgdp, #8
 274         cmp     cur_pgdp, end_pgdp
 275         b.ne    do_pgd
 276 
 277         /* Publish the updated tables and nuke all the TLBs */
 278         dsb     sy
 279         tlbi    vmalle1is
 280         dsb     ish
 281         isb
 282 
 283         /* We're done: fire up the MMU again */
 284         mrs     x18, sctlr_el1
 285         orr     x18, x18, #SCTLR_ELx_M
 286         msr     sctlr_el1, x18
 287         isb
 288 
 289         /*
 290          * Invalidate the local I-cache so that any instructions fetched
 291          * speculatively from the PoC are discarded, since they may have
 292          * been dynamically patched at the PoU.
 293          */
 294         ic      iallu
 295         dsb     nsh
 296         isb
 297 
 298         /* Set the flag to zero to indicate that we're all done */
 299         str     wzr, [flag_ptr]
 300         ret
 301 
 302         /* PUD */
 303 walk_puds:
 304         .if CONFIG_PGTABLE_LEVELS > 3
 305         pte_to_phys     cur_pudp, pgd
 306         add     end_pudp, cur_pudp, #(PTRS_PER_PUD * 8)
 307 do_pud: __idmap_kpti_get_pgtable_ent    pud
 308         tbnz    pud, #1, walk_pmds
 309 next_pud:
 310         __idmap_kpti_put_pgtable_ent_ng pud
 311 skip_pud:
 312         add     cur_pudp, cur_pudp, 8
 313         cmp     cur_pudp, end_pudp
 314         b.ne    do_pud
 315         b       next_pgd
 316         .else /* CONFIG_PGTABLE_LEVELS <= 3 */
 317         mov     pud, pgd
 318         b       walk_pmds
 319 next_pud:
 320         b       next_pgd
 321         .endif
 322 
 323         /* PMD */
 324 walk_pmds:
 325         .if CONFIG_PGTABLE_LEVELS > 2
 326         pte_to_phys     cur_pmdp, pud
 327         add     end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8)
 328 do_pmd: __idmap_kpti_get_pgtable_ent    pmd
 329         tbnz    pmd, #1, walk_ptes
 330 next_pmd:
 331         __idmap_kpti_put_pgtable_ent_ng pmd
 332 skip_pmd:
 333         add     cur_pmdp, cur_pmdp, #8
 334         cmp     cur_pmdp, end_pmdp
 335         b.ne    do_pmd
 336         b       next_pud
 337         .else /* CONFIG_PGTABLE_LEVELS <= 2 */
 338         mov     pmd, pud
 339         b       walk_ptes
 340 next_pmd:
 341         b       next_pud
 342         .endif
 343 
 344         /* PTE */
 345 walk_ptes:
 346         pte_to_phys     cur_ptep, pmd
 347         add     end_ptep, cur_ptep, #(PTRS_PER_PTE * 8)
 348 do_pte: __idmap_kpti_get_pgtable_ent    pte
 349         __idmap_kpti_put_pgtable_ent_ng pte
 350 skip_pte:
 351         add     cur_ptep, cur_ptep, #8
 352         cmp     cur_ptep, end_ptep
 353         b.ne    do_pte
 354         b       next_pmd
 355 
 356         /* Secondary CPUs end up here */
 357 __idmap_kpti_secondary:
 358         /* Uninstall swapper before surgery begins */
 359         __idmap_cpu_set_reserved_ttbr1 x18, x17
 360 
 361         /* Increment the flag to let the boot CPU we're ready */
 362 1:      ldxr    w18, [flag_ptr]
 363         add     w18, w18, #1
 364         stxr    w17, w18, [flag_ptr]
 365         cbnz    w17, 1b
 366 
 367         /* Wait for the boot CPU to finish messing around with swapper */
 368         sevl
 369 1:      wfe
 370         ldxr    w18, [flag_ptr]
 371         cbnz    w18, 1b
 372 
 373         /* All done, act like nothing happened */
 374         offset_ttbr1 swapper_ttb, x18
 375         msr     ttbr1_el1, swapper_ttb
 376         isb
 377         ret
 378 
 379         .unreq  cpu
 380         .unreq  num_cpus
 381         .unreq  swapper_pa
 382         .unreq  swapper_ttb
 383         .unreq  flag_ptr
 384         .unreq  cur_pgdp
 385         .unreq  end_pgdp
 386         .unreq  pgd
 387         .unreq  cur_pudp
 388         .unreq  end_pudp
 389         .unreq  pud
 390         .unreq  cur_pmdp
 391         .unreq  end_pmdp
 392         .unreq  pmd
 393         .unreq  cur_ptep
 394         .unreq  end_ptep
 395         .unreq  pte
 396 ENDPROC(idmap_kpti_install_ng_mappings)
 397         .popsection
 398 #endif
 399 
 400 /*
 401  *      __cpu_setup
 402  *
 403  *      Initialise the processor for turning the MMU on.  Return in x0 the
 404  *      value of the SCTLR_EL1 register.
 405  */
 406         .pushsection ".idmap.text", "awx"
 407 ENTRY(__cpu_setup)
 408         tlbi    vmalle1                         // Invalidate local TLB
 409         dsb     nsh
 410 
 411         mov     x0, #3 << 20
 412         msr     cpacr_el1, x0                   // Enable FP/ASIMD
 413         mov     x0, #1 << 12                    // Reset mdscr_el1 and disable
 414         msr     mdscr_el1, x0                   // access to the DCC from EL0
 415         isb                                     // Unmask debug exceptions now,
 416         enable_dbg                              // since this is per-cpu
 417         reset_pmuserenr_el0 x0                  // Disable PMU access from EL0
 418         /*
 419          * Memory region attributes for LPAE:
 420          *
 421          *   n = AttrIndx[2:0]
 422          *                      n       MAIR
 423          *   DEVICE_nGnRnE      000     00000000
 424          *   DEVICE_nGnRE       001     00000100
 425          *   DEVICE_GRE         010     00001100
 426          *   NORMAL_NC          011     01000100
 427          *   NORMAL             100     11111111
 428          *   NORMAL_WT          101     10111011
 429          */
 430         ldr     x5, =MAIR(0x00, MT_DEVICE_nGnRnE) | \
 431                      MAIR(0x04, MT_DEVICE_nGnRE) | \
 432                      MAIR(0x0c, MT_DEVICE_GRE) | \
 433                      MAIR(0x44, MT_NORMAL_NC) | \
 434                      MAIR(0xff, MT_NORMAL) | \
 435                      MAIR(0xbb, MT_NORMAL_WT)
 436         msr     mair_el1, x5
 437         /*
 438          * Prepare SCTLR
 439          */
 440         mov_q   x0, SCTLR_EL1_SET
 441         /*
 442          * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
 443          * both user and kernel.
 444          */
 445         ldr     x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
 446                         TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
 447                         TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS
 448         tcr_clear_errata_bits x10, x9, x5
 449 
 450 #ifdef CONFIG_ARM64_VA_BITS_52
 451         ldr_l           x9, vabits_actual
 452         sub             x9, xzr, x9
 453         add             x9, x9, #64
 454         tcr_set_t1sz    x10, x9
 455 #else
 456         ldr_l           x9, idmap_t0sz
 457 #endif
 458         tcr_set_t0sz    x10, x9
 459 
 460         /*
 461          * Set the IPS bits in TCR_EL1.
 462          */
 463         tcr_compute_pa_size x10, #TCR_IPS_SHIFT, x5, x6
 464 #ifdef CONFIG_ARM64_HW_AFDBM
 465         /*
 466          * Enable hardware update of the Access Flags bit.
 467          * Hardware dirty bit management is enabled later,
 468          * via capabilities.
 469          */
 470         mrs     x9, ID_AA64MMFR1_EL1
 471         and     x9, x9, #0xf
 472         cbz     x9, 1f
 473         orr     x10, x10, #TCR_HA               // hardware Access flag update
 474 1:
 475 #endif  /* CONFIG_ARM64_HW_AFDBM */
 476         msr     tcr_el1, x10
 477         ret                                     // return to head.S
 478 ENDPROC(__cpu_setup)

/* [<][>][^][v][top][bottom][index][help] */