root/arch/arm64/kvm/hyp/entry.S

/* [<][>][^][v][top][bottom][index][help] */
   1 /* SPDX-License-Identifier: GPL-2.0-only */
   2 /*
   3  * Copyright (C) 2015 - ARM Ltd
   4  * Author: Marc Zyngier <marc.zyngier@arm.com>
   5  */
   6 
   7 #include <linux/linkage.h>
   8 
   9 #include <asm/alternative.h>
  10 #include <asm/asm-offsets.h>
  11 #include <asm/assembler.h>
  12 #include <asm/fpsimdmacros.h>
  13 #include <asm/kvm.h>
  14 #include <asm/kvm_arm.h>
  15 #include <asm/kvm_asm.h>
  16 #include <asm/kvm_mmu.h>
  17 #include <asm/kvm_ptrauth.h>
  18 
  19 #define CPU_GP_REG_OFFSET(x)    (CPU_GP_REGS + x)
  20 #define CPU_XREG_OFFSET(x)      CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
  21 
  22         .text
  23         .pushsection    .hyp.text, "ax"
  24 
  25 .macro save_callee_saved_regs ctxt
  26         stp     x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
  27         stp     x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
  28         stp     x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
  29         stp     x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
  30         stp     x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
  31         stp     x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
  32 .endm
  33 
  34 .macro restore_callee_saved_regs ctxt
  35         ldp     x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
  36         ldp     x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
  37         ldp     x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
  38         ldp     x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
  39         ldp     x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
  40         ldp     x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
  41 .endm
  42 
  43 /*
  44  * u64 __guest_enter(struct kvm_vcpu *vcpu,
  45  *                   struct kvm_cpu_context *host_ctxt);
  46  */
  47 ENTRY(__guest_enter)
  48         // x0: vcpu
  49         // x1: host context
  50         // x2-x17: clobbered by macros
  51         // x18: guest context
  52 
  53         // Store the host regs
  54         save_callee_saved_regs x1
  55 
  56         // Now the host state is stored if we have a pending RAS SError it must
  57         // affect the host. If any asynchronous exception is pending we defer
  58         // the guest entry. The DSB isn't necessary before v8.2 as any SError
  59         // would be fatal.
  60 alternative_if ARM64_HAS_RAS_EXTN
  61         dsb     nshst
  62         isb
  63 alternative_else_nop_endif
  64         mrs     x1, isr_el1
  65         cbz     x1,  1f
  66         mov     x0, #ARM_EXCEPTION_IRQ
  67         ret
  68 
  69 1:
  70         add     x18, x0, #VCPU_CONTEXT
  71 
  72         // Macro ptrauth_switch_to_guest format:
  73         //      ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3)
  74         // The below macro to restore guest keys is not implemented in C code
  75         // as it may cause Pointer Authentication key signing mismatch errors
  76         // when this feature is enabled for kernel code.
  77         ptrauth_switch_to_guest x18, x0, x1, x2
  78 
  79         // Restore guest regs x0-x17
  80         ldp     x0, x1,   [x18, #CPU_XREG_OFFSET(0)]
  81         ldp     x2, x3,   [x18, #CPU_XREG_OFFSET(2)]
  82         ldp     x4, x5,   [x18, #CPU_XREG_OFFSET(4)]
  83         ldp     x6, x7,   [x18, #CPU_XREG_OFFSET(6)]
  84         ldp     x8, x9,   [x18, #CPU_XREG_OFFSET(8)]
  85         ldp     x10, x11, [x18, #CPU_XREG_OFFSET(10)]
  86         ldp     x12, x13, [x18, #CPU_XREG_OFFSET(12)]
  87         ldp     x14, x15, [x18, #CPU_XREG_OFFSET(14)]
  88         ldp     x16, x17, [x18, #CPU_XREG_OFFSET(16)]
  89 
  90         // Restore guest regs x19-x29, lr
  91         restore_callee_saved_regs x18
  92 
  93         // Restore guest reg x18
  94         ldr     x18,      [x18, #CPU_XREG_OFFSET(18)]
  95 
  96         // Do not touch any register after this!
  97         eret
  98         sb
  99 ENDPROC(__guest_enter)
 100 
 101 ENTRY(__guest_exit)
 102         // x0: return code
 103         // x1: vcpu
 104         // x2-x29,lr: vcpu regs
 105         // vcpu x0-x1 on the stack
 106 
 107         add     x1, x1, #VCPU_CONTEXT
 108 
 109         ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
 110 
 111         // Store the guest regs x2 and x3
 112         stp     x2, x3,   [x1, #CPU_XREG_OFFSET(2)]
 113 
 114         // Retrieve the guest regs x0-x1 from the stack
 115         ldp     x2, x3, [sp], #16       // x0, x1
 116 
 117         // Store the guest regs x0-x1 and x4-x18
 118         stp     x2, x3,   [x1, #CPU_XREG_OFFSET(0)]
 119         stp     x4, x5,   [x1, #CPU_XREG_OFFSET(4)]
 120         stp     x6, x7,   [x1, #CPU_XREG_OFFSET(6)]
 121         stp     x8, x9,   [x1, #CPU_XREG_OFFSET(8)]
 122         stp     x10, x11, [x1, #CPU_XREG_OFFSET(10)]
 123         stp     x12, x13, [x1, #CPU_XREG_OFFSET(12)]
 124         stp     x14, x15, [x1, #CPU_XREG_OFFSET(14)]
 125         stp     x16, x17, [x1, #CPU_XREG_OFFSET(16)]
 126         str     x18,      [x1, #CPU_XREG_OFFSET(18)]
 127 
 128         // Store the guest regs x19-x29, lr
 129         save_callee_saved_regs x1
 130 
 131         get_host_ctxt   x2, x3
 132 
 133         // Macro ptrauth_switch_to_guest format:
 134         //      ptrauth_switch_to_host(guest cxt, host cxt, tmp1, tmp2, tmp3)
 135         // The below macro to save/restore keys is not implemented in C code
 136         // as it may cause Pointer Authentication key signing mismatch errors
 137         // when this feature is enabled for kernel code.
 138         ptrauth_switch_to_host x1, x2, x3, x4, x5
 139 
 140         // Now restore the host regs
 141         restore_callee_saved_regs x2
 142 
 143 alternative_if ARM64_HAS_RAS_EXTN
 144         // If we have the RAS extensions we can consume a pending error
 145         // without an unmask-SError and isb. The ESB-instruction consumed any
 146         // pending guest error when we took the exception from the guest.
 147         mrs_s   x2, SYS_DISR_EL1
 148         str     x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)]
 149         cbz     x2, 1f
 150         msr_s   SYS_DISR_EL1, xzr
 151         orr     x0, x0, #(1<<ARM_EXIT_WITH_SERROR_BIT)
 152 1:      ret
 153 alternative_else
 154         dsb     sy              // Synchronize against in-flight ld/st
 155         isb                     // Prevent an early read of side-effect free ISR
 156         mrs     x2, isr_el1
 157         tbnz    x2, #8, 2f      // ISR_EL1.A
 158         ret
 159         nop
 160 2:
 161 alternative_endif
 162         // We know we have a pending asynchronous abort, now is the
 163         // time to flush it out. From your VAXorcist book, page 666:
 164         // "Threaten me not, oh Evil one!  For I speak with
 165         // the power of DEC, and I command thee to show thyself!"
 166         mrs     x2, elr_el2
 167         mrs     x3, esr_el2
 168         mrs     x4, spsr_el2
 169         mov     x5, x0
 170 
 171         msr     daifclr, #4     // Unmask aborts
 172 
 173         // This is our single instruction exception window. A pending
 174         // SError is guaranteed to occur at the earliest when we unmask
 175         // it, and at the latest just after the ISB.
 176         .global abort_guest_exit_start
 177 abort_guest_exit_start:
 178 
 179         isb
 180 
 181         .global abort_guest_exit_end
 182 abort_guest_exit_end:
 183 
 184         msr     daifset, #4     // Mask aborts
 185 
 186         // If the exception took place, restore the EL1 exception
 187         // context so that we can report some information.
 188         // Merge the exception code with the SError pending bit.
 189         tbz     x0, #ARM_EXIT_WITH_SERROR_BIT, 1f
 190         msr     elr_el2, x2
 191         msr     esr_el2, x3
 192         msr     spsr_el2, x4
 193         orr     x0, x0, x5
 194 1:      ret
 195 ENDPROC(__guest_exit)

/* [<][>][^][v][top][bottom][index][help] */