root/arch/powerpc/kvm/book3s_segment.S

/* [<][>][^][v][top][bottom][index][help] */
   1 /* SPDX-License-Identifier: GPL-2.0-only */
   2 /*
   3  *
   4  * Copyright SUSE Linux Products GmbH 2010
   5  *
   6  * Authors: Alexander Graf <agraf@suse.de>
   7  */
   8 
   9 /* Real mode helpers */
  10 
  11 #include <asm/asm-compat.h>
  12 #include <asm/feature-fixups.h>
  13 
  14 #if defined(CONFIG_PPC_BOOK3S_64)
  15 
  16 #define GET_SHADOW_VCPU(reg)    \
  17         mr      reg, r13
  18 
  19 #elif defined(CONFIG_PPC_BOOK3S_32)
  20 
  21 #define GET_SHADOW_VCPU(reg)                            \
  22         tophys(reg, r2);                        \
  23         lwz     reg, (THREAD + THREAD_KVM_SVCPU)(reg);  \
  24         tophys(reg, reg)
  25 
  26 #endif
  27 
  28 /* Disable for nested KVM */
  29 #define USE_QUICK_LAST_INST
  30 
  31 
  32 /* Get helper functions for subarch specific functionality */
  33 
  34 #if defined(CONFIG_PPC_BOOK3S_64)
  35 #include "book3s_64_slb.S"
  36 #elif defined(CONFIG_PPC_BOOK3S_32)
  37 #include "book3s_32_sr.S"
  38 #endif
  39 
  40 /******************************************************************************
  41  *                                                                            *
  42  *                               Entry code                                   *
  43  *                                                                            *
  44  *****************************************************************************/
  45 
  46 .global kvmppc_handler_trampoline_enter
  47 kvmppc_handler_trampoline_enter:
  48 
  49         /* Required state:
  50          *
  51          * MSR = ~IR|DR
  52          * R1 = host R1
  53          * R2 = host R2
  54          * R4 = guest shadow MSR
  55          * R5 = normal host MSR
  56          * R6 = current host MSR (EE, IR, DR off)
  57          * LR = highmem guest exit code
  58          * all other volatile GPRS = free
  59          * SVCPU[CR] = guest CR
  60          * SVCPU[XER] = guest XER
  61          * SVCPU[CTR] = guest CTR
  62          * SVCPU[LR] = guest LR
  63          */
  64 
  65         /* r3 = shadow vcpu */
  66         GET_SHADOW_VCPU(r3)
  67 
  68         /* Save guest exit handler address and MSR */
  69         mflr    r0
  70         PPC_STL r0, HSTATE_VMHANDLER(r3)
  71         PPC_STL r5, HSTATE_HOST_MSR(r3)
  72 
  73         /* Save R1/R2 in the PACA (64-bit) or shadow_vcpu (32-bit) */
  74         PPC_STL r1, HSTATE_HOST_R1(r3)
  75         PPC_STL r2, HSTATE_HOST_R2(r3)
  76 
  77         /* Activate guest mode, so faults get handled by KVM */
  78         li      r11, KVM_GUEST_MODE_GUEST
  79         stb     r11, HSTATE_IN_GUEST(r3)
  80 
  81         /* Switch to guest segment. This is subarch specific. */
  82         LOAD_GUEST_SEGMENTS
  83 
  84 #ifdef CONFIG_PPC_BOOK3S_64
  85 BEGIN_FTR_SECTION
  86         /* Save host FSCR */
  87         mfspr   r8, SPRN_FSCR
  88         std     r8, HSTATE_HOST_FSCR(r13)
  89         /* Set FSCR during guest execution */
  90         ld      r9, SVCPU_SHADOW_FSCR(r13)
  91         mtspr   SPRN_FSCR, r9
  92 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
  93 
  94         /* Some guests may need to have dcbz set to 32 byte length.
  95          *
  96          * Usually we ensure that by patching the guest's instructions
  97          * to trap on dcbz and emulate it in the hypervisor.
  98          *
  99          * If we can, we should tell the CPU to use 32 byte dcbz though,
 100          * because that's a lot faster.
 101          */
 102         lbz     r0, HSTATE_RESTORE_HID5(r3)
 103         cmpwi   r0, 0
 104         beq     no_dcbz32_on
 105 
 106         mfspr   r0,SPRN_HID5
 107         ori     r0, r0, 0x80            /* XXX HID5_dcbz32 = 0x80 */
 108         mtspr   SPRN_HID5,r0
 109 no_dcbz32_on:
 110 
 111 #endif /* CONFIG_PPC_BOOK3S_64 */
 112 
 113         /* Enter guest */
 114 
 115         PPC_LL  r8, SVCPU_CTR(r3)
 116         PPC_LL  r9, SVCPU_LR(r3)
 117         lwz     r10, SVCPU_CR(r3)
 118         PPC_LL  r11, SVCPU_XER(r3)
 119 
 120         mtctr   r8
 121         mtlr    r9
 122         mtcr    r10
 123         mtxer   r11
 124 
 125         /* Move SRR0 and SRR1 into the respective regs */
 126         PPC_LL  r9, SVCPU_PC(r3)
 127         /* First clear RI in our current MSR value */
 128         li      r0, MSR_RI
 129         andc    r6, r6, r0
 130 
 131         PPC_LL  r0, SVCPU_R0(r3)
 132         PPC_LL  r1, SVCPU_R1(r3)
 133         PPC_LL  r2, SVCPU_R2(r3)
 134         PPC_LL  r5, SVCPU_R5(r3)
 135         PPC_LL  r7, SVCPU_R7(r3)
 136         PPC_LL  r8, SVCPU_R8(r3)
 137         PPC_LL  r10, SVCPU_R10(r3)
 138         PPC_LL  r11, SVCPU_R11(r3)
 139         PPC_LL  r12, SVCPU_R12(r3)
 140         PPC_LL  r13, SVCPU_R13(r3)
 141 
 142         MTMSR_EERI(r6)
 143         mtsrr0  r9
 144         mtsrr1  r4
 145 
 146         PPC_LL  r4, SVCPU_R4(r3)
 147         PPC_LL  r6, SVCPU_R6(r3)
 148         PPC_LL  r9, SVCPU_R9(r3)
 149         PPC_LL  r3, (SVCPU_R3)(r3)
 150 
 151         RFI_TO_GUEST
 152 kvmppc_handler_trampoline_enter_end:
 153 
 154 
 155 
 156 /******************************************************************************
 157  *                                                                            *
 158  *                               Exit code                                    *
 159  *                                                                            *
 160  *****************************************************************************/
 161 
 162 .global kvmppc_interrupt_pr
 163 kvmppc_interrupt_pr:
 164         /* 64-bit entry. Register usage at this point:
 165          *
 166          * SPRG_SCRATCH0   = guest R13
 167          * R12             = (guest CR << 32) | exit handler id
 168          * R13             = PACA
 169          * HSTATE.SCRATCH0 = guest R12
 170          * HSTATE.SCRATCH1 = guest CTR if RELOCATABLE
 171          */
 172 #ifdef CONFIG_PPC64
 173         /* Match 32-bit entry */
 174 #ifdef CONFIG_RELOCATABLE
 175         std     r9, HSTATE_SCRATCH2(r13)
 176         ld      r9, HSTATE_SCRATCH1(r13)
 177         mtctr   r9
 178         ld      r9, HSTATE_SCRATCH2(r13)
 179 #endif
 180         rotldi  r12, r12, 32              /* Flip R12 halves for stw */
 181         stw     r12, HSTATE_SCRATCH1(r13) /* CR is now in the low half */
 182         srdi    r12, r12, 32              /* shift trap into low half */
 183 #endif
 184 
 185 .global kvmppc_handler_trampoline_exit
 186 kvmppc_handler_trampoline_exit:
 187         /* Register usage at this point:
 188          *
 189          * SPRG_SCRATCH0   = guest R13
 190          * R12             = exit handler id
 191          * R13             = shadow vcpu (32-bit) or PACA (64-bit)
 192          * HSTATE.SCRATCH0 = guest R12
 193          * HSTATE.SCRATCH1 = guest CR
 194          */
 195 
 196         /* Save registers */
 197 
 198         PPC_STL r0, SVCPU_R0(r13)
 199         PPC_STL r1, SVCPU_R1(r13)
 200         PPC_STL r2, SVCPU_R2(r13)
 201         PPC_STL r3, SVCPU_R3(r13)
 202         PPC_STL r4, SVCPU_R4(r13)
 203         PPC_STL r5, SVCPU_R5(r13)
 204         PPC_STL r6, SVCPU_R6(r13)
 205         PPC_STL r7, SVCPU_R7(r13)
 206         PPC_STL r8, SVCPU_R8(r13)
 207         PPC_STL r9, SVCPU_R9(r13)
 208         PPC_STL r10, SVCPU_R10(r13)
 209         PPC_STL r11, SVCPU_R11(r13)
 210 
 211         /* Restore R1/R2 so we can handle faults */
 212         PPC_LL  r1, HSTATE_HOST_R1(r13)
 213         PPC_LL  r2, HSTATE_HOST_R2(r13)
 214 
 215         /* Save guest PC and MSR */
 216 #ifdef CONFIG_PPC64
 217 BEGIN_FTR_SECTION
 218         andi.   r0, r12, 0x2
 219         cmpwi   cr1, r0, 0
 220         beq     1f
 221         mfspr   r3,SPRN_HSRR0
 222         mfspr   r4,SPRN_HSRR1
 223         andi.   r12,r12,0x3ffd
 224         b       2f
 225 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
 226 #endif
 227 1:      mfsrr0  r3
 228         mfsrr1  r4
 229 2:
 230         PPC_STL r3, SVCPU_PC(r13)
 231         PPC_STL r4, SVCPU_SHADOW_SRR1(r13)
 232 
 233         /* Get scratch'ed off registers */
 234         GET_SCRATCH0(r9)
 235         PPC_LL  r8, HSTATE_SCRATCH0(r13)
 236         lwz     r7, HSTATE_SCRATCH1(r13)
 237 
 238         PPC_STL r9, SVCPU_R13(r13)
 239         PPC_STL r8, SVCPU_R12(r13)
 240         stw     r7, SVCPU_CR(r13)
 241 
 242         /* Save more register state  */
 243 
 244         mfxer   r5
 245         mfdar   r6
 246         mfdsisr r7
 247         mfctr   r8
 248         mflr    r9
 249 
 250         PPC_STL r5, SVCPU_XER(r13)
 251         PPC_STL r6, SVCPU_FAULT_DAR(r13)
 252         stw     r7, SVCPU_FAULT_DSISR(r13)
 253         PPC_STL r8, SVCPU_CTR(r13)
 254         PPC_STL r9, SVCPU_LR(r13)
 255 
 256         /*
 257          * In order for us to easily get the last instruction,
 258          * we got the #vmexit at, we exploit the fact that the
 259          * virtual layout is still the same here, so we can just
 260          * ld from the guest's PC address
 261          */
 262 
 263         /* We only load the last instruction when it's safe */
 264         cmpwi   r12, BOOK3S_INTERRUPT_DATA_STORAGE
 265         beq     ld_last_inst
 266         cmpwi   r12, BOOK3S_INTERRUPT_PROGRAM
 267         beq     ld_last_inst
 268         cmpwi   r12, BOOK3S_INTERRUPT_SYSCALL
 269         beq     ld_last_prev_inst
 270         cmpwi   r12, BOOK3S_INTERRUPT_ALIGNMENT
 271         beq-    ld_last_inst
 272 #ifdef CONFIG_PPC64
 273 BEGIN_FTR_SECTION
 274         cmpwi   r12, BOOK3S_INTERRUPT_H_EMUL_ASSIST
 275         beq-    ld_last_inst
 276 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
 277 BEGIN_FTR_SECTION
 278         cmpwi   r12, BOOK3S_INTERRUPT_FAC_UNAVAIL
 279         beq-    ld_last_inst
 280 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 281 #endif
 282 
 283         b       no_ld_last_inst
 284 
 285 ld_last_prev_inst:
 286         addi    r3, r3, -4
 287 
 288 ld_last_inst:
 289         /* Save off the guest instruction we're at */
 290 
 291         /* In case lwz faults */
 292         li      r0, KVM_INST_FETCH_FAILED
 293 
 294 #ifdef USE_QUICK_LAST_INST
 295 
 296         /* Set guest mode to 'jump over instruction' so if lwz faults
 297          * we'll just continue at the next IP. */
 298         li      r9, KVM_GUEST_MODE_SKIP
 299         stb     r9, HSTATE_IN_GUEST(r13)
 300 
 301         /*    1) enable paging for data */
 302         mfmsr   r9
 303         ori     r11, r9, MSR_DR                 /* Enable paging for data */
 304         mtmsr   r11
 305         sync
 306         /*    2) fetch the instruction */
 307         lwz     r0, 0(r3)
 308         /*    3) disable paging again */
 309         mtmsr   r9
 310         sync
 311 
 312 #endif
 313         stw     r0, SVCPU_LAST_INST(r13)
 314 
 315 no_ld_last_inst:
 316 
 317         /* Unset guest mode */
 318         li      r9, KVM_GUEST_MODE_NONE
 319         stb     r9, HSTATE_IN_GUEST(r13)
 320 
 321         /* Switch back to host MMU */
 322         LOAD_HOST_SEGMENTS
 323 
 324 #ifdef CONFIG_PPC_BOOK3S_64
 325 
 326         lbz     r5, HSTATE_RESTORE_HID5(r13)
 327         cmpwi   r5, 0
 328         beq     no_dcbz32_off
 329 
 330         li      r4, 0
 331         mfspr   r5,SPRN_HID5
 332         rldimi  r5,r4,6,56
 333         mtspr   SPRN_HID5,r5
 334 
 335 no_dcbz32_off:
 336 
 337 BEGIN_FTR_SECTION
 338         /* Save guest FSCR on a FAC_UNAVAIL interrupt */
 339         cmpwi   r12, BOOK3S_INTERRUPT_FAC_UNAVAIL
 340         bne+    no_fscr_save
 341         mfspr   r7, SPRN_FSCR
 342         std     r7, SVCPU_SHADOW_FSCR(r13)
 343 no_fscr_save:
 344         /* Restore host FSCR */
 345         ld      r8, HSTATE_HOST_FSCR(r13)
 346         mtspr   SPRN_FSCR, r8
 347 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 348 
 349 #endif /* CONFIG_PPC_BOOK3S_64 */
 350 
 351         /*
 352          * For some interrupts, we need to call the real Linux
 353          * handler, so it can do work for us. This has to happen
 354          * as if the interrupt arrived from the kernel though,
 355          * so let's fake it here where most state is restored.
 356          *
 357          * Having set up SRR0/1 with the address where we want
 358          * to continue with relocation on (potentially in module
 359          * space), we either just go straight there with rfi[d],
 360          * or we jump to an interrupt handler if there is an
 361          * interrupt to be handled first.  In the latter case,
 362          * the rfi[d] at the end of the interrupt handler will
 363          * get us back to where we want to continue.
 364          */
 365 
 366         /* Register usage at this point:
 367          *
 368          * R1       = host R1
 369          * R2       = host R2
 370          * R10      = raw exit handler id
 371          * R12      = exit handler id
 372          * R13      = shadow vcpu (32-bit) or PACA (64-bit)
 373          * SVCPU.*  = guest *
 374          *
 375          */
 376 
 377         PPC_LL  r6, HSTATE_HOST_MSR(r13)
 378 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 379         /*
 380          * We don't want to change MSR[TS] bits via rfi here.
 381          * The actual TM handling logic will be in host with
 382          * recovered DR/IR bits after HSTATE_VMHANDLER.
 383          * And MSR_TM can be enabled in HOST_MSR so rfid may
 384          * not suppress this change and can lead to exception.
 385          * Manually set MSR to prevent TS state change here.
 386          */
 387         mfmsr   r7
 388         rldicl  r7, r7, 64 - MSR_TS_S_LG, 62
 389         rldimi  r6, r7, MSR_TS_S_LG, 63 - MSR_TS_T_LG
 390 #endif
 391         PPC_LL  r8, HSTATE_VMHANDLER(r13)
 392 
 393 #ifdef CONFIG_PPC64
 394 BEGIN_FTR_SECTION
 395         beq     cr1, 1f
 396         mtspr   SPRN_HSRR1, r6
 397         mtspr   SPRN_HSRR0, r8
 398 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
 399 #endif
 400 1:      /* Restore host msr -> SRR1 */
 401         mtsrr1  r6
 402         /* Load highmem handler address */
 403         mtsrr0  r8
 404 
 405         /* RFI into the highmem handler, or jump to interrupt handler */
 406         cmpwi   r12, BOOK3S_INTERRUPT_EXTERNAL
 407         beqa    BOOK3S_INTERRUPT_EXTERNAL
 408         cmpwi   r12, BOOK3S_INTERRUPT_DECREMENTER
 409         beqa    BOOK3S_INTERRUPT_DECREMENTER
 410         cmpwi   r12, BOOK3S_INTERRUPT_PERFMON
 411         beqa    BOOK3S_INTERRUPT_PERFMON
 412         cmpwi   r12, BOOK3S_INTERRUPT_DOORBELL
 413         beqa    BOOK3S_INTERRUPT_DOORBELL
 414 
 415         RFI_TO_KERNEL
 416 kvmppc_handler_trampoline_exit_end:

/* [<][>][^][v][top][bottom][index][help] */