root/arch/powerpc/kvm/book3s_interrupts.S

/* [<][>][^][v][top][bottom][index][help] */
   1 /* SPDX-License-Identifier: GPL-2.0-only */
   2 /*
   3  *
   4  * Copyright SUSE Linux Products GmbH 2009
   5  *
   6  * Authors: Alexander Graf <agraf@suse.de>
   7  */
   8 
   9 #include <asm/ppc_asm.h>
  10 #include <asm/kvm_asm.h>
  11 #include <asm/reg.h>
  12 #include <asm/page.h>
  13 #include <asm/asm-offsets.h>
  14 #include <asm/exception-64s.h>
  15 #include <asm/asm-compat.h>
  16 
  17 #if defined(CONFIG_PPC_BOOK3S_64)
  18 #ifdef PPC64_ELF_ABI_v2
  19 #define FUNC(name)              name
  20 #else
  21 #define FUNC(name)              GLUE(.,name)
  22 #endif
  23 #define GET_SHADOW_VCPU(reg)    addi    reg, r13, PACA_SVCPU
  24 
  25 #elif defined(CONFIG_PPC_BOOK3S_32)
  26 #define FUNC(name)              name
  27 #define GET_SHADOW_VCPU(reg)    lwz     reg, (THREAD + THREAD_KVM_SVCPU)(r2)
  28 
  29 #endif /* CONFIG_PPC_BOOK3S_XX */
  30 
  31 #define VCPU_LOAD_NVGPRS(vcpu) \
  32         PPC_LL  r14, VCPU_GPR(R14)(vcpu); \
  33         PPC_LL  r15, VCPU_GPR(R15)(vcpu); \
  34         PPC_LL  r16, VCPU_GPR(R16)(vcpu); \
  35         PPC_LL  r17, VCPU_GPR(R17)(vcpu); \
  36         PPC_LL  r18, VCPU_GPR(R18)(vcpu); \
  37         PPC_LL  r19, VCPU_GPR(R19)(vcpu); \
  38         PPC_LL  r20, VCPU_GPR(R20)(vcpu); \
  39         PPC_LL  r21, VCPU_GPR(R21)(vcpu); \
  40         PPC_LL  r22, VCPU_GPR(R22)(vcpu); \
  41         PPC_LL  r23, VCPU_GPR(R23)(vcpu); \
  42         PPC_LL  r24, VCPU_GPR(R24)(vcpu); \
  43         PPC_LL  r25, VCPU_GPR(R25)(vcpu); \
  44         PPC_LL  r26, VCPU_GPR(R26)(vcpu); \
  45         PPC_LL  r27, VCPU_GPR(R27)(vcpu); \
  46         PPC_LL  r28, VCPU_GPR(R28)(vcpu); \
  47         PPC_LL  r29, VCPU_GPR(R29)(vcpu); \
  48         PPC_LL  r30, VCPU_GPR(R30)(vcpu); \
  49         PPC_LL  r31, VCPU_GPR(R31)(vcpu); \
  50 
  51 /*****************************************************************************
  52  *                                                                           *
  53  *     Guest entry / exit code that is in kernel module memory (highmem)     *
  54  *                                                                           *
  55  ****************************************************************************/
  56 
  57 /* Registers:
  58  *  r3: kvm_run pointer
  59  *  r4: vcpu pointer
  60  */
  61 _GLOBAL(__kvmppc_vcpu_run)
  62 
  63 kvm_start_entry:
  64         /* Write correct stack frame */
  65         mflr    r0
  66         PPC_STL r0,PPC_LR_STKOFF(r1)
  67 
  68         /* Save host state to the stack */
  69         PPC_STLU r1, -SWITCH_FRAME_SIZE(r1)
  70 
  71         /* Save r3 (kvm_run) and r4 (vcpu) */
  72         SAVE_2GPRS(3, r1)
  73 
  74         /* Save non-volatile registers (r14 - r31) */
  75         SAVE_NVGPRS(r1)
  76 
  77         /* Save CR */
  78         mfcr    r14
  79         stw     r14, _CCR(r1)
  80 
  81         /* Save LR */
  82         PPC_STL r0, _LINK(r1)
  83 
  84         /* Load non-volatile guest state from the vcpu */
  85         VCPU_LOAD_NVGPRS(r4)
  86 
  87 kvm_start_lightweight:
  88         /* Copy registers into shadow vcpu so we can access them in real mode */
  89         mr      r3, r4
  90         bl      FUNC(kvmppc_copy_to_svcpu)
  91         nop
  92         REST_GPR(4, r1)
  93 
  94 #ifdef CONFIG_PPC_BOOK3S_64
  95         /* Get the dcbz32 flag */
  96         PPC_LL  r3, VCPU_HFLAGS(r4)
  97         rldicl  r3, r3, 0, 63           /* r3 &= 1 */
  98         stb     r3, HSTATE_RESTORE_HID5(r13)
  99 
 100         /* Load up guest SPRG3 value, since it's user readable */
 101         lwz     r3, VCPU_SHAREDBE(r4)
 102         cmpwi   r3, 0
 103         ld      r5, VCPU_SHARED(r4)
 104         beq     sprg3_little_endian
 105 sprg3_big_endian:
 106 #ifdef __BIG_ENDIAN__
 107         ld      r3, VCPU_SHARED_SPRG3(r5)
 108 #else
 109         addi    r5, r5, VCPU_SHARED_SPRG3
 110         ldbrx   r3, 0, r5
 111 #endif
 112         b       after_sprg3_load
 113 sprg3_little_endian:
 114 #ifdef __LITTLE_ENDIAN__
 115         ld      r3, VCPU_SHARED_SPRG3(r5)
 116 #else
 117         addi    r5, r5, VCPU_SHARED_SPRG3
 118         ldbrx   r3, 0, r5
 119 #endif
 120 
 121 after_sprg3_load:
 122         mtspr   SPRN_SPRG3, r3
 123 #endif /* CONFIG_PPC_BOOK3S_64 */
 124 
 125         PPC_LL  r4, VCPU_SHADOW_MSR(r4) /* get shadow_msr */
 126 
 127         /* Jump to segment patching handler and into our guest */
 128         bl      FUNC(kvmppc_entry_trampoline)
 129         nop
 130 
 131 /*
 132  * This is the handler in module memory. It gets jumped at from the
 133  * lowmem trampoline code, so it's basically the guest exit code.
 134  *
 135  */
 136 
 137         /*
 138          * Register usage at this point:
 139          *
 140          * R1       = host R1
 141          * R2       = host R2
 142          * R12      = exit handler id
 143          * R13      = PACA
 144          * SVCPU.*  = guest *
 145          * MSR.EE   = 1
 146          *
 147          */
 148 
 149         PPC_LL  r3, GPR4(r1)            /* vcpu pointer */
 150 
 151         /*
 152          * kvmppc_copy_from_svcpu can clobber volatile registers, save
 153          * the exit handler id to the vcpu and restore it from there later.
 154          */
 155         stw     r12, VCPU_TRAP(r3)
 156 
 157         /* Transfer reg values from shadow vcpu back to vcpu struct */
 158 
 159         bl      FUNC(kvmppc_copy_from_svcpu)
 160         nop
 161 
 162 #ifdef CONFIG_PPC_BOOK3S_64
 163         /*
 164          * Reload kernel SPRG3 value.
 165          * No need to save guest value as usermode can't modify SPRG3.
 166          */
 167         ld      r3, PACA_SPRG_VDSO(r13)
 168         mtspr   SPRN_SPRG_VDSO_WRITE, r3
 169 #endif /* CONFIG_PPC_BOOK3S_64 */
 170 
 171         /* R7 = vcpu */
 172         PPC_LL  r7, GPR4(r1)
 173 
 174         PPC_STL r14, VCPU_GPR(R14)(r7)
 175         PPC_STL r15, VCPU_GPR(R15)(r7)
 176         PPC_STL r16, VCPU_GPR(R16)(r7)
 177         PPC_STL r17, VCPU_GPR(R17)(r7)
 178         PPC_STL r18, VCPU_GPR(R18)(r7)
 179         PPC_STL r19, VCPU_GPR(R19)(r7)
 180         PPC_STL r20, VCPU_GPR(R20)(r7)
 181         PPC_STL r21, VCPU_GPR(R21)(r7)
 182         PPC_STL r22, VCPU_GPR(R22)(r7)
 183         PPC_STL r23, VCPU_GPR(R23)(r7)
 184         PPC_STL r24, VCPU_GPR(R24)(r7)
 185         PPC_STL r25, VCPU_GPR(R25)(r7)
 186         PPC_STL r26, VCPU_GPR(R26)(r7)
 187         PPC_STL r27, VCPU_GPR(R27)(r7)
 188         PPC_STL r28, VCPU_GPR(R28)(r7)
 189         PPC_STL r29, VCPU_GPR(R29)(r7)
 190         PPC_STL r30, VCPU_GPR(R30)(r7)
 191         PPC_STL r31, VCPU_GPR(R31)(r7)
 192 
 193         /* Pass the exit number as 3rd argument to kvmppc_handle_exit */
 194         lwz     r5, VCPU_TRAP(r7)
 195 
 196         /* Restore r3 (kvm_run) and r4 (vcpu) */
 197         REST_2GPRS(3, r1)
 198         bl      FUNC(kvmppc_handle_exit_pr)
 199 
 200         /* If RESUME_GUEST, get back in the loop */
 201         cmpwi   r3, RESUME_GUEST
 202         beq     kvm_loop_lightweight
 203 
 204         cmpwi   r3, RESUME_GUEST_NV
 205         beq     kvm_loop_heavyweight
 206 
 207 kvm_exit_loop:
 208 
 209         PPC_LL  r4, _LINK(r1)
 210         mtlr    r4
 211 
 212         lwz     r14, _CCR(r1)
 213         mtcr    r14
 214 
 215         /* Restore non-volatile host registers (r14 - r31) */
 216         REST_NVGPRS(r1)
 217 
 218         addi    r1, r1, SWITCH_FRAME_SIZE
 219         blr
 220 
 221 kvm_loop_heavyweight:
 222 
 223         PPC_LL  r4, _LINK(r1)
 224         PPC_STL r4, (PPC_LR_STKOFF + SWITCH_FRAME_SIZE)(r1)
 225 
 226         /* Load vcpu and cpu_run */
 227         REST_2GPRS(3, r1)
 228 
 229         /* Load non-volatile guest state from the vcpu */
 230         VCPU_LOAD_NVGPRS(r4)
 231 
 232         /* Jump back into the beginning of this function */
 233         b       kvm_start_lightweight
 234 
 235 kvm_loop_lightweight:
 236 
 237         /* We'll need the vcpu pointer */
 238         REST_GPR(4, r1)
 239 
 240         /* Jump back into the beginning of this function */
 241         b       kvm_start_lightweight

/* [<][>][^][v][top][bottom][index][help] */