root/arch/arm64/kernel/hibernate-asm.S

/* [<][>][^][v][top][bottom][index][help] */
   1 /* SPDX-License-Identifier: GPL-2.0-only */
   2 /*
   3  * Hibernate low-level support
   4  *
   5  * Copyright (C) 2016 ARM Ltd.
   6  * Author:      James Morse <james.morse@arm.com>
   7  */
   8 #include <linux/linkage.h>
   9 #include <linux/errno.h>
  10 
  11 #include <asm/asm-offsets.h>
  12 #include <asm/assembler.h>
  13 #include <asm/cputype.h>
  14 #include <asm/memory.h>
  15 #include <asm/page.h>
  16 #include <asm/virt.h>
  17 
  18 /*
  19  * To prevent the possibility of old and new partial table walks being visible
  20  * in the tlb, switch the ttbr to a zero page when we invalidate the old
  21  * records. D4.7.1 'General TLB maintenance requirements' in ARM DDI 0487A.i
  22  * Even switching to our copied tables will cause a changed output address at
  23  * each stage of the walk.
  24  */
  25 .macro break_before_make_ttbr_switch zero_page, page_table, tmp, tmp2
  26         phys_to_ttbr \tmp, \zero_page
  27         msr     ttbr1_el1, \tmp
  28         isb
  29         tlbi    vmalle1
  30         dsb     nsh
  31         phys_to_ttbr \tmp, \page_table
  32         offset_ttbr1 \tmp, \tmp2
  33         msr     ttbr1_el1, \tmp
  34         isb
  35 .endm
  36 
  37 
  38 /*
  39  * Resume from hibernate
  40  *
  41  * Loads temporary page tables then restores the memory image.
  42  * Finally branches to cpu_resume() to restore the state saved by
  43  * swsusp_arch_suspend().
  44  *
  45  * Because this code has to be copied to a 'safe' page, it can't call out to
  46  * other functions by PC-relative address. Also remember that it may be
  47  * mid-way through over-writing other functions. For this reason it contains
  48  * code from flush_icache_range() and uses the copy_page() macro.
  49  *
  50  * This 'safe' page is mapped via ttbr0, and executed from there. This function
  51  * switches to a copy of the linear map in ttbr1, performs the restore, then
  52  * switches ttbr1 to the original kernel's swapper_pg_dir.
  53  *
  54  * All of memory gets written to, including code. We need to clean the kernel
  55  * text to the Point of Coherence (PoC) before secondary cores can be booted.
  56  * Because the kernel modules and executable pages mapped to user space are
  57  * also written as data, we clean all pages we touch to the Point of
  58  * Unification (PoU).
  59  *
  60  * x0: physical address of temporary page tables
  61  * x1: physical address of swapper page tables
  62  * x2: address of cpu_resume
  63  * x3: linear map address of restore_pblist in the current kernel
  64  * x4: physical address of __hyp_stub_vectors, or 0
  65  * x5: physical address of a  zero page that remains zero after resume
  66  */
  67 .pushsection    ".hibernate_exit.text", "ax"
  68 ENTRY(swsusp_arch_suspend_exit)
  69         /*
  70          * We execute from ttbr0, change ttbr1 to our copied linear map tables
  71          * with a break-before-make via the zero page
  72          */
  73         break_before_make_ttbr_switch   x5, x0, x6, x8
  74 
  75         mov     x21, x1
  76         mov     x30, x2
  77         mov     x24, x4
  78         mov     x25, x5
  79 
  80         /* walk the restore_pblist and use copy_page() to over-write memory */
  81         mov     x19, x3
  82 
  83 1:      ldr     x10, [x19, #HIBERN_PBE_ORIG]
  84         mov     x0, x10
  85         ldr     x1, [x19, #HIBERN_PBE_ADDR]
  86 
  87         copy_page       x0, x1, x2, x3, x4, x5, x6, x7, x8, x9
  88 
  89         add     x1, x10, #PAGE_SIZE
  90         /* Clean the copied page to PoU - based on flush_icache_range() */
  91         raw_dcache_line_size x2, x3
  92         sub     x3, x2, #1
  93         bic     x4, x10, x3
  94 2:      dc      cvau, x4        /* clean D line / unified line */
  95         add     x4, x4, x2
  96         cmp     x4, x1
  97         b.lo    2b
  98 
  99         ldr     x19, [x19, #HIBERN_PBE_NEXT]
 100         cbnz    x19, 1b
 101         dsb     ish             /* wait for PoU cleaning to finish */
 102 
 103         /* switch to the restored kernels page tables */
 104         break_before_make_ttbr_switch   x25, x21, x6, x8
 105 
 106         ic      ialluis
 107         dsb     ish
 108         isb
 109 
 110         cbz     x24, 3f         /* Do we need to re-initialise EL2? */
 111         hvc     #0
 112 3:      ret
 113 
 114         .ltorg
 115 ENDPROC(swsusp_arch_suspend_exit)
 116 
 117 /*
 118  * Restore the hyp stub.
 119  * This must be done before the hibernate page is unmapped by _cpu_resume(),
 120  * but happens before any of the hyp-stub's code is cleaned to PoC.
 121  *
 122  * x24: The physical address of __hyp_stub_vectors
 123  */
 124 el1_sync:
 125         msr     vbar_el2, x24
 126         eret
 127 ENDPROC(el1_sync)
 128 
 129 .macro invalid_vector   label
 130 \label:
 131         b \label
 132 ENDPROC(\label)
 133 .endm
 134 
 135         invalid_vector  el2_sync_invalid
 136         invalid_vector  el2_irq_invalid
 137         invalid_vector  el2_fiq_invalid
 138         invalid_vector  el2_error_invalid
 139         invalid_vector  el1_sync_invalid
 140         invalid_vector  el1_irq_invalid
 141         invalid_vector  el1_fiq_invalid
 142         invalid_vector  el1_error_invalid
 143 
 144 /* el2 vectors - switch el2 here while we restore the memory image. */
 145         .align 11
 146 ENTRY(hibernate_el2_vectors)
 147         ventry  el2_sync_invalid                // Synchronous EL2t
 148         ventry  el2_irq_invalid                 // IRQ EL2t
 149         ventry  el2_fiq_invalid                 // FIQ EL2t
 150         ventry  el2_error_invalid               // Error EL2t
 151 
 152         ventry  el2_sync_invalid                // Synchronous EL2h
 153         ventry  el2_irq_invalid                 // IRQ EL2h
 154         ventry  el2_fiq_invalid                 // FIQ EL2h
 155         ventry  el2_error_invalid               // Error EL2h
 156 
 157         ventry  el1_sync                        // Synchronous 64-bit EL1
 158         ventry  el1_irq_invalid                 // IRQ 64-bit EL1
 159         ventry  el1_fiq_invalid                 // FIQ 64-bit EL1
 160         ventry  el1_error_invalid               // Error 64-bit EL1
 161 
 162         ventry  el1_sync_invalid                // Synchronous 32-bit EL1
 163         ventry  el1_irq_invalid                 // IRQ 32-bit EL1
 164         ventry  el1_fiq_invalid                 // FIQ 32-bit EL1
 165         ventry  el1_error_invalid               // Error 32-bit EL1
 166 END(hibernate_el2_vectors)
 167 
 168 .popsection

/* [<][>][^][v][top][bottom][index][help] */