root/arch/powerpc/kvm/tm.S

/* [<][>][^][v][top][bottom][index][help] */
   1 /* SPDX-License-Identifier: GPL-2.0-only */
   2 /*
   3  *
   4  * Derived from book3s_hv_rmhandlers.S, which is:
   5  *
   6  * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
   7  */
   8 
   9 #include <asm/reg.h>
  10 #include <asm/ppc_asm.h>
  11 #include <asm/asm-offsets.h>
  12 #include <asm/export.h>
  13 #include <asm/tm.h>
  14 #include <asm/cputable.h>
  15 
  16 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  17 #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
  18 
  19 /*
  20  * Save transactional state and TM-related registers.
  21  * Called with:
  22  * - r3 pointing to the vcpu struct
  23  * - r4 containing the MSR with current TS bits:
  24  *      (For HV KVM, it is VCPU_MSR ; For PR KVM, it is host MSR).
  25  * - r5 containing a flag indicating that non-volatile registers
  26  *      must be preserved.
  27  * If r5 == 0, this can modify all checkpointed registers, but
  28  * restores r1, r2 before exit.  If r5 != 0, this restores the
  29  * MSR TM/FP/VEC/VSX bits to their state on entry.
  30  */
  31 _GLOBAL(__kvmppc_save_tm)
  32         mflr    r0
  33         std     r0, PPC_LR_STKOFF(r1)
  34         stdu    r1, -SWITCH_FRAME_SIZE(r1)
  35 
  36         mr      r9, r3
  37         cmpdi   cr7, r5, 0
  38 
  39         /* Turn on TM. */
  40         mfmsr   r8
  41         mr      r10, r8
  42         li      r0, 1
  43         rldimi  r8, r0, MSR_TM_LG, 63-MSR_TM_LG
  44         ori     r8, r8, MSR_FP
  45         oris    r8, r8, (MSR_VEC | MSR_VSX)@h
  46         mtmsrd  r8
  47 
  48         rldicl. r4, r4, 64 - MSR_TS_S_LG, 62
  49         beq     1f      /* TM not active in guest. */
  50 
  51         std     r1, HSTATE_SCRATCH2(r13)
  52         std     r3, HSTATE_SCRATCH1(r13)
  53 
  54         /* Save CR on the stack - even if r5 == 0 we need to get cr7 back. */
  55         mfcr    r6
  56         SAVE_GPR(6, r1)
  57 
  58         /* Save DSCR so we can restore it to avoid running with user value */
  59         mfspr   r7, SPRN_DSCR
  60         SAVE_GPR(7, r1)
  61 
  62         /*
  63          * We are going to do treclaim., which will modify all checkpointed
  64          * registers.  Save the non-volatile registers on the stack if
  65          * preservation of non-volatile state has been requested.
  66          */
  67         beq     cr7, 3f
  68         SAVE_NVGPRS(r1)
  69 
  70         /* MSR[TS] will be 0 (non-transactional) once we do treclaim. */
  71         li      r0, 0
  72         rldimi  r10, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
  73         SAVE_GPR(10, r1)        /* final MSR value */
  74 3:
  75 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  76 BEGIN_FTR_SECTION
  77         /* Emulation of the treclaim instruction needs TEXASR before treclaim */
  78         mfspr   r6, SPRN_TEXASR
  79         std     r6, VCPU_ORIG_TEXASR(r3)
  80 END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
  81 #endif
  82 
  83         /* Clear the MSR RI since r1, r13 are all going to be foobar. */
  84         li      r5, 0
  85         mtmsrd  r5, 1
  86 
  87         li      r3, TM_CAUSE_KVM_RESCHED
  88 
  89         /* All GPRs are volatile at this point. */
  90         TRECLAIM(R3)
  91 
  92         /* Temporarily store r13 and r9 so we have some regs to play with */
  93         SET_SCRATCH0(r13)
  94         GET_PACA(r13)
  95         std     r9, PACATMSCRATCH(r13)
  96         ld      r9, HSTATE_SCRATCH1(r13)
  97 
  98         /* Save away PPR soon so we don't run with user value. */
  99         std     r0, VCPU_GPRS_TM(0)(r9)
 100         mfspr   r0, SPRN_PPR
 101         HMT_MEDIUM
 102 
 103         /* Reload stack pointer. */
 104         std     r1, VCPU_GPRS_TM(1)(r9)
 105         ld      r1, HSTATE_SCRATCH2(r13)
 106 
 107         /* Set MSR RI now we have r1 and r13 back. */
 108         std     r2, VCPU_GPRS_TM(2)(r9)
 109         li      r2, MSR_RI
 110         mtmsrd  r2, 1
 111 
 112         /* Reload TOC pointer. */
 113         ld      r2, PACATOC(r13)
 114 
 115         /* Save all but r0-r2, r9 & r13 */
 116         reg = 3
 117         .rept   29
 118         .if (reg != 9) && (reg != 13)
 119         std     reg, VCPU_GPRS_TM(reg)(r9)
 120         .endif
 121         reg = reg + 1
 122         .endr
 123         /* ... now save r13 */
 124         GET_SCRATCH0(r4)
 125         std     r4, VCPU_GPRS_TM(13)(r9)
 126         /* ... and save r9 */
 127         ld      r4, PACATMSCRATCH(r13)
 128         std     r4, VCPU_GPRS_TM(9)(r9)
 129 
 130         /* Restore host DSCR and CR values, after saving guest values */
 131         mfcr    r6
 132         mfspr   r7, SPRN_DSCR
 133         stw     r6, VCPU_CR_TM(r9)
 134         std     r7, VCPU_DSCR_TM(r9)
 135         REST_GPR(6, r1)
 136         REST_GPR(7, r1)
 137         mtcr    r6
 138         mtspr   SPRN_DSCR, r7
 139 
 140         /* Save away checkpointed SPRs. */
 141         std     r0, VCPU_PPR_TM(r9)
 142         mflr    r5
 143         mfctr   r7
 144         mfspr   r8, SPRN_AMR
 145         mfspr   r10, SPRN_TAR
 146         mfxer   r11
 147         std     r5, VCPU_LR_TM(r9)
 148         std     r7, VCPU_CTR_TM(r9)
 149         std     r8, VCPU_AMR_TM(r9)
 150         std     r10, VCPU_TAR_TM(r9)
 151         std     r11, VCPU_XER_TM(r9)
 152 
 153         /* Save FP/VSX. */
 154         addi    r3, r9, VCPU_FPRS_TM
 155         bl      store_fp_state
 156         addi    r3, r9, VCPU_VRS_TM
 157         bl      store_vr_state
 158         mfspr   r6, SPRN_VRSAVE
 159         stw     r6, VCPU_VRSAVE_TM(r9)
 160 
 161         /* Restore non-volatile registers if requested to */
 162         beq     cr7, 1f
 163         REST_NVGPRS(r1)
 164         REST_GPR(10, r1)
 165 1:
 166         /*
 167          * We need to save these SPRs after the treclaim so that the software
 168          * error code is recorded correctly in the TEXASR.  Also the user may
 169          * change these outside of a transaction, so they must always be
 170          * context switched.
 171          */
 172         mfspr   r7, SPRN_TEXASR
 173         std     r7, VCPU_TEXASR(r9)
 174         mfspr   r5, SPRN_TFHAR
 175         mfspr   r6, SPRN_TFIAR
 176         std     r5, VCPU_TFHAR(r9)
 177         std     r6, VCPU_TFIAR(r9)
 178 
 179         /* Restore MSR state if requested */
 180         beq     cr7, 2f
 181         mtmsrd  r10, 0
 182 2:
 183         addi    r1, r1, SWITCH_FRAME_SIZE
 184         ld      r0, PPC_LR_STKOFF(r1)
 185         mtlr    r0
 186         blr
 187 
 188 /*
 189  * _kvmppc_save_tm_pr() is a wrapper around __kvmppc_save_tm(), so that it can
 190  * be invoked from C function by PR KVM only.
 191  */
 192 _GLOBAL(_kvmppc_save_tm_pr)
 193         mflr    r0
 194         std     r0, PPC_LR_STKOFF(r1)
 195         stdu    r1, -PPC_MIN_STKFRM(r1)
 196 
 197         mfspr   r8, SPRN_TAR
 198         std     r8, PPC_MIN_STKFRM-8(r1)
 199 
 200         li      r5, 1           /* preserve non-volatile registers */
 201         bl      __kvmppc_save_tm
 202 
 203         ld      r8, PPC_MIN_STKFRM-8(r1)
 204         mtspr   SPRN_TAR, r8
 205 
 206         addi    r1, r1, PPC_MIN_STKFRM
 207         ld      r0, PPC_LR_STKOFF(r1)
 208         mtlr    r0
 209         blr
 210 
 211 EXPORT_SYMBOL_GPL(_kvmppc_save_tm_pr);
 212 
 213 /*
 214  * Restore transactional state and TM-related registers.
 215  * Called with:
 216  *  - r3 pointing to the vcpu struct.
 217  *  - r4 is the guest MSR with desired TS bits:
 218  *      For HV KVM, it is VCPU_MSR
 219  *      For PR KVM, it is provided by caller
 220  * - r5 containing a flag indicating that non-volatile registers
 221  *      must be preserved.
 222  * If r5 == 0, this potentially modifies all checkpointed registers, but
 223  * restores r1, r2 from the PACA before exit.
 224  * If r5 != 0, this restores the MSR TM/FP/VEC/VSX bits to their state on entry.
 225  */
 226 _GLOBAL(__kvmppc_restore_tm)
 227         mflr    r0
 228         std     r0, PPC_LR_STKOFF(r1)
 229 
 230         cmpdi   cr7, r5, 0
 231 
 232         /* Turn on TM/FP/VSX/VMX so we can restore them. */
 233         mfmsr   r5
 234         mr      r10, r5
 235         li      r6, MSR_TM >> 32
 236         sldi    r6, r6, 32
 237         or      r5, r5, r6
 238         ori     r5, r5, MSR_FP
 239         oris    r5, r5, (MSR_VEC | MSR_VSX)@h
 240         mtmsrd  r5
 241 
 242         /*
 243          * The user may change these outside of a transaction, so they must
 244          * always be context switched.
 245          */
 246         ld      r5, VCPU_TFHAR(r3)
 247         ld      r6, VCPU_TFIAR(r3)
 248         ld      r7, VCPU_TEXASR(r3)
 249         mtspr   SPRN_TFHAR, r5
 250         mtspr   SPRN_TFIAR, r6
 251         mtspr   SPRN_TEXASR, r7
 252 
 253         mr      r5, r4
 254         rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
 255         beq     9f              /* TM not active in guest */
 256 
 257         /* Make sure the failure summary is set, otherwise we'll program check
 258          * when we trechkpt.  It's possible that this might have been not set
 259          * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
 260          * host.
 261          */
 262         oris    r7, r7, (TEXASR_FS)@h
 263         mtspr   SPRN_TEXASR, r7
 264 
 265         /*
 266          * Make a stack frame and save non-volatile registers if requested.
 267          */
 268         stdu    r1, -SWITCH_FRAME_SIZE(r1)
 269         std     r1, HSTATE_SCRATCH2(r13)
 270 
 271         mfcr    r6
 272         mfspr   r7, SPRN_DSCR
 273         SAVE_GPR(2, r1)
 274         SAVE_GPR(6, r1)
 275         SAVE_GPR(7, r1)
 276 
 277         beq     cr7, 4f
 278         SAVE_NVGPRS(r1)
 279 
 280         /* MSR[TS] will be 1 (suspended) once we do trechkpt */
 281         li      r0, 1
 282         rldimi  r10, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
 283         SAVE_GPR(10, r1)        /* final MSR value */
 284 4:
 285         /*
 286          * We need to load up the checkpointed state for the guest.
 287          * We need to do this early as it will blow away any GPRs, VSRs and
 288          * some SPRs.
 289          */
 290 
 291         mr      r31, r3
 292         addi    r3, r31, VCPU_FPRS_TM
 293         bl      load_fp_state
 294         addi    r3, r31, VCPU_VRS_TM
 295         bl      load_vr_state
 296         mr      r3, r31
 297         lwz     r7, VCPU_VRSAVE_TM(r3)
 298         mtspr   SPRN_VRSAVE, r7
 299 
 300         ld      r5, VCPU_LR_TM(r3)
 301         lwz     r6, VCPU_CR_TM(r3)
 302         ld      r7, VCPU_CTR_TM(r3)
 303         ld      r8, VCPU_AMR_TM(r3)
 304         ld      r9, VCPU_TAR_TM(r3)
 305         ld      r10, VCPU_XER_TM(r3)
 306         mtlr    r5
 307         mtcr    r6
 308         mtctr   r7
 309         mtspr   SPRN_AMR, r8
 310         mtspr   SPRN_TAR, r9
 311         mtxer   r10
 312 
 313         /*
 314          * Load up PPR and DSCR values but don't put them in the actual SPRs
 315          * till the last moment to avoid running with userspace PPR and DSCR for
 316          * too long.
 317          */
 318         ld      r29, VCPU_DSCR_TM(r3)
 319         ld      r30, VCPU_PPR_TM(r3)
 320 
 321         /* Clear the MSR RI since r1, r13 are all going to be foobar. */
 322         li      r5, 0
 323         mtmsrd  r5, 1
 324 
 325         /* Load GPRs r0-r28 */
 326         reg = 0
 327         .rept   29
 328         ld      reg, VCPU_GPRS_TM(reg)(r31)
 329         reg = reg + 1
 330         .endr
 331 
 332         mtspr   SPRN_DSCR, r29
 333         mtspr   SPRN_PPR, r30
 334 
 335         /* Load final GPRs */
 336         ld      29, VCPU_GPRS_TM(29)(r31)
 337         ld      30, VCPU_GPRS_TM(30)(r31)
 338         ld      31, VCPU_GPRS_TM(31)(r31)
 339 
 340         /* TM checkpointed state is now setup.  All GPRs are now volatile. */
 341         TRECHKPT
 342 
 343         /* Now let's get back the state we need. */
 344         HMT_MEDIUM
 345         GET_PACA(r13)
 346         ld      r1, HSTATE_SCRATCH2(r13)
 347         REST_GPR(7, r1)
 348         mtspr   SPRN_DSCR, r7
 349 
 350         /* Set the MSR RI since we have our registers back. */
 351         li      r5, MSR_RI
 352         mtmsrd  r5, 1
 353 
 354         /* Restore TOC pointer and CR */
 355         REST_GPR(2, r1)
 356         REST_GPR(6, r1)
 357         mtcr    r6
 358 
 359         /* Restore non-volatile registers if requested to. */
 360         beq     cr7, 5f
 361         REST_GPR(10, r1)
 362         REST_NVGPRS(r1)
 363 
 364 5:      addi    r1, r1, SWITCH_FRAME_SIZE
 365         ld      r0, PPC_LR_STKOFF(r1)
 366         mtlr    r0
 367 
 368 9:      /* Restore MSR bits if requested */
 369         beqlr   cr7
 370         mtmsrd  r10, 0
 371         blr
 372 
 373 /*
 374  * _kvmppc_restore_tm_pr() is a wrapper around __kvmppc_restore_tm(), so that it
 375  * can be invoked from C function by PR KVM only.
 376  */
 377 _GLOBAL(_kvmppc_restore_tm_pr)
 378         mflr    r0
 379         std     r0, PPC_LR_STKOFF(r1)
 380         stdu    r1, -PPC_MIN_STKFRM(r1)
 381 
 382         /* save TAR so that it can be recovered later */
 383         mfspr   r8, SPRN_TAR
 384         std     r8, PPC_MIN_STKFRM-8(r1)
 385 
 386         li      r5, 1
 387         bl      __kvmppc_restore_tm
 388 
 389         ld      r8, PPC_MIN_STKFRM-8(r1)
 390         mtspr   SPRN_TAR, r8
 391 
 392         addi    r1, r1, PPC_MIN_STKFRM
 393         ld      r0, PPC_LR_STKOFF(r1)
 394         mtlr    r0
 395         blr
 396 
 397 EXPORT_SYMBOL_GPL(_kvmppc_restore_tm_pr);
 398 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */

/* [<][>][^][v][top][bottom][index][help] */