root/arch/arm64/kvm/hyp/sysreg-sr.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __sysreg_save_common_state
  2. __sysreg_save_user_state
  3. __sysreg_save_el1_state
  4. __sysreg_save_el2_return_state
  5. __sysreg_save_state_nvhe
  6. sysreg_save_host_state_vhe
  7. sysreg_save_guest_state_vhe
  8. __sysreg_restore_common_state
  9. __sysreg_restore_user_state
  10. __sysreg_restore_el1_state
  11. __sysreg_restore_el2_return_state
  12. __sysreg_restore_state_nvhe
  13. sysreg_restore_host_state_vhe
  14. sysreg_restore_guest_state_vhe
  15. __sysreg32_save_state
  16. __sysreg32_restore_state
  17. kvm_vcpu_load_sysregs
  18. kvm_vcpu_put_sysregs
  19. __kvm_enable_ssbs

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (C) 2012-2015 - ARM Ltd
   4  * Author: Marc Zyngier <marc.zyngier@arm.com>
   5  */
   6 
   7 #include <linux/compiler.h>
   8 #include <linux/kvm_host.h>
   9 
  10 #include <asm/kprobes.h>
  11 #include <asm/kvm_asm.h>
  12 #include <asm/kvm_emulate.h>
  13 #include <asm/kvm_hyp.h>
  14 
  15 /*
  16  * Non-VHE: Both host and guest must save everything.
  17  *
  18  * VHE: Host and guest must save mdscr_el1 and sp_el0 (and the PC and pstate,
  19  * which are handled as part of the el2 return state) on every switch.
  20  * tpidr_el0 and tpidrro_el0 only need to be switched when going
  21  * to host userspace or a different VCPU.  EL1 registers only need to be
  22  * switched when potentially going to run a different VCPU.  The latter two
  23  * classes are handled as part of kvm_arch_vcpu_load and kvm_arch_vcpu_put.
  24  */
  25 
  26 static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
  27 {
  28         ctxt->sys_regs[MDSCR_EL1]       = read_sysreg(mdscr_el1);
  29 
  30         /*
  31          * The host arm64 Linux uses sp_el0 to point to 'current' and it must
  32          * therefore be saved/restored on every entry/exit to/from the guest.
  33          */
  34         ctxt->gp_regs.regs.sp           = read_sysreg(sp_el0);
  35 }
  36 
  37 static void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
  38 {
  39         ctxt->sys_regs[TPIDR_EL0]       = read_sysreg(tpidr_el0);
  40         ctxt->sys_regs[TPIDRRO_EL0]     = read_sysreg(tpidrro_el0);
  41 }
  42 
  43 static void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
  44 {
  45         ctxt->sys_regs[CSSELR_EL1]      = read_sysreg(csselr_el1);
  46         ctxt->sys_regs[SCTLR_EL1]       = read_sysreg_el1(SYS_SCTLR);
  47         ctxt->sys_regs[ACTLR_EL1]       = read_sysreg(actlr_el1);
  48         ctxt->sys_regs[CPACR_EL1]       = read_sysreg_el1(SYS_CPACR);
  49         ctxt->sys_regs[TTBR0_EL1]       = read_sysreg_el1(SYS_TTBR0);
  50         ctxt->sys_regs[TTBR1_EL1]       = read_sysreg_el1(SYS_TTBR1);
  51         ctxt->sys_regs[TCR_EL1]         = read_sysreg_el1(SYS_TCR);
  52         ctxt->sys_regs[ESR_EL1]         = read_sysreg_el1(SYS_ESR);
  53         ctxt->sys_regs[AFSR0_EL1]       = read_sysreg_el1(SYS_AFSR0);
  54         ctxt->sys_regs[AFSR1_EL1]       = read_sysreg_el1(SYS_AFSR1);
  55         ctxt->sys_regs[FAR_EL1]         = read_sysreg_el1(SYS_FAR);
  56         ctxt->sys_regs[MAIR_EL1]        = read_sysreg_el1(SYS_MAIR);
  57         ctxt->sys_regs[VBAR_EL1]        = read_sysreg_el1(SYS_VBAR);
  58         ctxt->sys_regs[CONTEXTIDR_EL1]  = read_sysreg_el1(SYS_CONTEXTIDR);
  59         ctxt->sys_regs[AMAIR_EL1]       = read_sysreg_el1(SYS_AMAIR);
  60         ctxt->sys_regs[CNTKCTL_EL1]     = read_sysreg_el1(SYS_CNTKCTL);
  61         ctxt->sys_regs[PAR_EL1]         = read_sysreg(par_el1);
  62         ctxt->sys_regs[TPIDR_EL1]       = read_sysreg(tpidr_el1);
  63 
  64         ctxt->gp_regs.sp_el1            = read_sysreg(sp_el1);
  65         ctxt->gp_regs.elr_el1           = read_sysreg_el1(SYS_ELR);
  66         ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(SYS_SPSR);
  67 }
  68 
  69 static void __hyp_text __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
  70 {
  71         ctxt->gp_regs.regs.pc           = read_sysreg_el2(SYS_ELR);
  72         ctxt->gp_regs.regs.pstate       = read_sysreg_el2(SYS_SPSR);
  73 
  74         if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
  75                 ctxt->sys_regs[DISR_EL1] = read_sysreg_s(SYS_VDISR_EL2);
  76 }
  77 
  78 void __hyp_text __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt)
  79 {
  80         __sysreg_save_el1_state(ctxt);
  81         __sysreg_save_common_state(ctxt);
  82         __sysreg_save_user_state(ctxt);
  83         __sysreg_save_el2_return_state(ctxt);
  84 }
  85 
  86 void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt)
  87 {
  88         __sysreg_save_common_state(ctxt);
  89 }
  90 NOKPROBE_SYMBOL(sysreg_save_host_state_vhe);
  91 
  92 void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt)
  93 {
  94         __sysreg_save_common_state(ctxt);
  95         __sysreg_save_el2_return_state(ctxt);
  96 }
  97 NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe);
  98 
  99 static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
 100 {
 101         write_sysreg(ctxt->sys_regs[MDSCR_EL1],   mdscr_el1);
 102 
 103         /*
 104          * The host arm64 Linux uses sp_el0 to point to 'current' and it must
 105          * therefore be saved/restored on every entry/exit to/from the guest.
 106          */
 107         write_sysreg(ctxt->gp_regs.regs.sp,       sp_el0);
 108 }
 109 
 110 static void __hyp_text __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
 111 {
 112         write_sysreg(ctxt->sys_regs[TPIDR_EL0],         tpidr_el0);
 113         write_sysreg(ctxt->sys_regs[TPIDRRO_EL0],       tpidrro_el0);
 114 }
 115 
 116 static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
 117 {
 118         write_sysreg(ctxt->sys_regs[MPIDR_EL1],         vmpidr_el2);
 119         write_sysreg(ctxt->sys_regs[CSSELR_EL1],        csselr_el1);
 120         write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1],     SYS_SCTLR);
 121         write_sysreg(ctxt->sys_regs[ACTLR_EL1],         actlr_el1);
 122         write_sysreg_el1(ctxt->sys_regs[CPACR_EL1],     SYS_CPACR);
 123         write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1],     SYS_TTBR0);
 124         write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1],     SYS_TTBR1);
 125         write_sysreg_el1(ctxt->sys_regs[TCR_EL1],       SYS_TCR);
 126         write_sysreg_el1(ctxt->sys_regs[ESR_EL1],       SYS_ESR);
 127         write_sysreg_el1(ctxt->sys_regs[AFSR0_EL1],     SYS_AFSR0);
 128         write_sysreg_el1(ctxt->sys_regs[AFSR1_EL1],     SYS_AFSR1);
 129         write_sysreg_el1(ctxt->sys_regs[FAR_EL1],       SYS_FAR);
 130         write_sysreg_el1(ctxt->sys_regs[MAIR_EL1],      SYS_MAIR);
 131         write_sysreg_el1(ctxt->sys_regs[VBAR_EL1],      SYS_VBAR);
 132         write_sysreg_el1(ctxt->sys_regs[CONTEXTIDR_EL1],SYS_CONTEXTIDR);
 133         write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1],     SYS_AMAIR);
 134         write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1],   SYS_CNTKCTL);
 135         write_sysreg(ctxt->sys_regs[PAR_EL1],           par_el1);
 136         write_sysreg(ctxt->sys_regs[TPIDR_EL1],         tpidr_el1);
 137 
 138         write_sysreg(ctxt->gp_regs.sp_el1,              sp_el1);
 139         write_sysreg_el1(ctxt->gp_regs.elr_el1,         SYS_ELR);
 140         write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],SYS_SPSR);
 141 }
 142 
 143 static void __hyp_text
 144 __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
 145 {
 146         u64 pstate = ctxt->gp_regs.regs.pstate;
 147         u64 mode = pstate & PSR_AA32_MODE_MASK;
 148 
 149         /*
 150          * Safety check to ensure we're setting the CPU up to enter the guest
 151          * in a less privileged mode.
 152          *
 153          * If we are attempting a return to EL2 or higher in AArch64 state,
 154          * program SPSR_EL2 with M=EL2h and the IL bit set which ensures that
 155          * we'll take an illegal exception state exception immediately after
 156          * the ERET to the guest.  Attempts to return to AArch32 Hyp will
 157          * result in an illegal exception return because EL2's execution state
 158          * is determined by SCR_EL3.RW.
 159          */
 160         if (!(mode & PSR_MODE32_BIT) && mode >= PSR_MODE_EL2t)
 161                 pstate = PSR_MODE_EL2h | PSR_IL_BIT;
 162 
 163         write_sysreg_el2(ctxt->gp_regs.regs.pc,         SYS_ELR);
 164         write_sysreg_el2(pstate,                        SYS_SPSR);
 165 
 166         if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
 167                 write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2);
 168 }
 169 
 170 void __hyp_text __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt)
 171 {
 172         __sysreg_restore_el1_state(ctxt);
 173         __sysreg_restore_common_state(ctxt);
 174         __sysreg_restore_user_state(ctxt);
 175         __sysreg_restore_el2_return_state(ctxt);
 176 }
 177 
 178 void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt)
 179 {
 180         __sysreg_restore_common_state(ctxt);
 181 }
 182 NOKPROBE_SYMBOL(sysreg_restore_host_state_vhe);
 183 
 184 void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt)
 185 {
 186         __sysreg_restore_common_state(ctxt);
 187         __sysreg_restore_el2_return_state(ctxt);
 188 }
 189 NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe);
 190 
 191 void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
 192 {
 193         u64 *spsr, *sysreg;
 194 
 195         if (!vcpu_el1_is_32bit(vcpu))
 196                 return;
 197 
 198         spsr = vcpu->arch.ctxt.gp_regs.spsr;
 199         sysreg = vcpu->arch.ctxt.sys_regs;
 200 
 201         spsr[KVM_SPSR_ABT] = read_sysreg(spsr_abt);
 202         spsr[KVM_SPSR_UND] = read_sysreg(spsr_und);
 203         spsr[KVM_SPSR_IRQ] = read_sysreg(spsr_irq);
 204         spsr[KVM_SPSR_FIQ] = read_sysreg(spsr_fiq);
 205 
 206         sysreg[DACR32_EL2] = read_sysreg(dacr32_el2);
 207         sysreg[IFSR32_EL2] = read_sysreg(ifsr32_el2);
 208 
 209         if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
 210                 sysreg[DBGVCR32_EL2] = read_sysreg(dbgvcr32_el2);
 211 }
 212 
 213 void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
 214 {
 215         u64 *spsr, *sysreg;
 216 
 217         if (!vcpu_el1_is_32bit(vcpu))
 218                 return;
 219 
 220         spsr = vcpu->arch.ctxt.gp_regs.spsr;
 221         sysreg = vcpu->arch.ctxt.sys_regs;
 222 
 223         write_sysreg(spsr[KVM_SPSR_ABT], spsr_abt);
 224         write_sysreg(spsr[KVM_SPSR_UND], spsr_und);
 225         write_sysreg(spsr[KVM_SPSR_IRQ], spsr_irq);
 226         write_sysreg(spsr[KVM_SPSR_FIQ], spsr_fiq);
 227 
 228         write_sysreg(sysreg[DACR32_EL2], dacr32_el2);
 229         write_sysreg(sysreg[IFSR32_EL2], ifsr32_el2);
 230 
 231         if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
 232                 write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2);
 233 }
 234 
 235 /**
 236  * kvm_vcpu_load_sysregs - Load guest system registers to the physical CPU
 237  *
 238  * @vcpu: The VCPU pointer
 239  *
 240  * Load system registers that do not affect the host's execution, for
 241  * example EL1 system registers on a VHE system where the host kernel
 242  * runs at EL2.  This function is called from KVM's vcpu_load() function
 243  * and loading system register state early avoids having to load them on
 244  * every entry to the VM.
 245  */
 246 void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
 247 {
 248         struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
 249         struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
 250 
 251         if (!has_vhe())
 252                 return;
 253 
 254         __sysreg_save_user_state(host_ctxt);
 255 
 256         /*
 257          * Load guest EL1 and user state
 258          *
 259          * We must restore the 32-bit state before the sysregs, thanks
 260          * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
 261          */
 262         __sysreg32_restore_state(vcpu);
 263         __sysreg_restore_user_state(guest_ctxt);
 264         __sysreg_restore_el1_state(guest_ctxt);
 265 
 266         vcpu->arch.sysregs_loaded_on_cpu = true;
 267 
 268         activate_traps_vhe_load(vcpu);
 269 }
 270 
 271 /**
 272  * kvm_vcpu_put_sysregs - Restore host system registers to the physical CPU
 273  *
 274  * @vcpu: The VCPU pointer
 275  *
 276  * Save guest system registers that do not affect the host's execution, for
 277  * example EL1 system registers on a VHE system where the host kernel
 278  * runs at EL2.  This function is called from KVM's vcpu_put() function
 279  * and deferring saving system register state until we're no longer running the
 280  * VCPU avoids having to save them on every exit from the VM.
 281  */
 282 void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu)
 283 {
 284         struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
 285         struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
 286 
 287         if (!has_vhe())
 288                 return;
 289 
 290         deactivate_traps_vhe_put();
 291 
 292         __sysreg_save_el1_state(guest_ctxt);
 293         __sysreg_save_user_state(guest_ctxt);
 294         __sysreg32_save_state(vcpu);
 295 
 296         /* Restore host user state */
 297         __sysreg_restore_user_state(host_ctxt);
 298 
 299         vcpu->arch.sysregs_loaded_on_cpu = false;
 300 }
 301 
 302 void __hyp_text __kvm_enable_ssbs(void)
 303 {
 304         u64 tmp;
 305 
 306         asm volatile(
 307         "mrs    %0, sctlr_el2\n"
 308         "orr    %0, %0, %1\n"
 309         "msr    sctlr_el2, %0"
 310         : "=&r" (tmp) : "L" (SCTLR_ELx_DSSBS));
 311 }

/* [<][>][^][v][top][bottom][index][help] */