root/arch/arm/kvm/hyp/entry.S

/* [<][>][^][v][top][bottom][index][help] */
   1 /* SPDX-License-Identifier: GPL-2.0-only */
   2 /*
   3  * Copyright (C) 2016 - ARM Ltd
   4  * Author: Marc Zyngier <marc.zyngier@arm.com>
   5 */
   6 
   7 #include <linux/linkage.h>
   8 #include <asm/asm-offsets.h>
   9 #include <asm/kvm_arm.h>
  10 #include <asm/kvm_asm.h>
  11 
  12         .arch_extension     virt
  13 
  14         .text
  15         .pushsection    .hyp.text, "ax"
  16 
  17 #define USR_REGS_OFFSET         (CPU_CTXT_GP_REGS + GP_REGS_USR)
  18 
  19 /* int __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host) */
  20 ENTRY(__guest_enter)
  21         @ Save host registers
  22         add     r1, r1, #(USR_REGS_OFFSET + S_R4)
  23         stm     r1!, {r4-r12}
  24         str     lr, [r1, #4]    @ Skip SP_usr (already saved)
  25 
  26         @ Restore guest registers
  27         add     r0, r0,  #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R0)
  28         ldr     lr, [r0, #S_LR]
  29         ldm     r0, {r0-r12}
  30 
  31         clrex
  32         eret
  33 ENDPROC(__guest_enter)
  34 
  35 ENTRY(__guest_exit)
  36         /*
  37          * return convention:
  38          * guest r0, r1, r2 saved on the stack
  39          * r0: vcpu pointer
  40          * r1: exception code
  41          */
  42 
  43         add     r2, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R3)
  44         stm     r2!, {r3-r12}
  45         str     lr, [r2, #4]
  46         add     r2, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R0)
  47         pop     {r3, r4, r5}            @ r0, r1, r2
  48         stm     r2, {r3-r5}
  49 
  50         ldr     r0, [r0, #VCPU_HOST_CTXT]
  51         add     r0, r0, #(USR_REGS_OFFSET + S_R4)
  52         ldm     r0!, {r4-r12}
  53         ldr     lr, [r0, #4]
  54 
  55         mov     r0, r1
  56         mrs     r1, SPSR
  57         mrs     r2, ELR_hyp
  58         mrc     p15, 4, r3, c5, c2, 0   @ HSR
  59 
  60         /*
  61          * Force loads and stores to complete before unmasking aborts
  62          * and forcing the delivery of the exception. This gives us a
  63          * single instruction window, which the handler will try to
  64          * match.
  65          */
  66         dsb     sy
  67         cpsie   a
  68 
  69         .global abort_guest_exit_start
  70 abort_guest_exit_start:
  71 
  72         isb
  73 
  74         .global abort_guest_exit_end
  75 abort_guest_exit_end:
  76 
  77         /*
  78          * If we took an abort, r0[31] will be set, and cmp will set
  79          * the N bit in PSTATE.
  80          */
  81         cmp     r0, #0
  82         msrmi   SPSR_cxsf, r1
  83         msrmi   ELR_hyp, r2
  84         mcrmi   p15, 4, r3, c5, c2, 0   @ HSR
  85 
  86         bx      lr
  87 ENDPROC(__guest_exit)
  88 
  89 /*
  90  * If VFPv3 support is not available, then we will not switch the VFP
  91  * registers; however cp10 and cp11 accesses will still trap and fallback
  92  * to the regular coprocessor emulation code, which currently will
  93  * inject an undefined exception to the guest.
  94  */
  95 #ifdef CONFIG_VFPv3
  96 ENTRY(__vfp_guest_restore)
  97         push    {r3, r4, lr}
  98 
  99         @ NEON/VFP used.  Turn on VFP access.
 100         mrc     p15, 4, r1, c1, c1, 2           @ HCPTR
 101         bic     r1, r1, #(HCPTR_TCP(10) | HCPTR_TCP(11))
 102         mcr     p15, 4, r1, c1, c1, 2           @ HCPTR
 103         isb
 104 
 105         @ Switch VFP/NEON hardware state to the guest's
 106         mov     r4, r0
 107         ldr     r0, [r0, #VCPU_HOST_CTXT]
 108         add     r0, r0, #CPU_CTXT_VFP
 109         bl      __vfp_save_state
 110         add     r0, r4, #(VCPU_GUEST_CTXT + CPU_CTXT_VFP)
 111         bl      __vfp_restore_state
 112 
 113         pop     {r3, r4, lr}
 114         pop     {r0, r1, r2}
 115         clrex
 116         eret
 117 ENDPROC(__vfp_guest_restore)
 118 #endif
 119 
 120         .popsection
 121 

/* [<][>][^][v][top][bottom][index][help] */