root/arch/x86/kvm/vmx/vmenter.S

/* [<][>][^][v][top][bottom][index][help] */
   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #include <linux/linkage.h>
   3 #include <asm/asm.h>
   4 #include <asm/bitsperlong.h>
   5 #include <asm/kvm_vcpu_regs.h>
   6 #include <asm/nospec-branch.h>
   7 
   8 #define WORD_SIZE (BITS_PER_LONG / 8)
   9 
  10 #define VCPU_RAX        __VCPU_REGS_RAX * WORD_SIZE
  11 #define VCPU_RCX        __VCPU_REGS_RCX * WORD_SIZE
  12 #define VCPU_RDX        __VCPU_REGS_RDX * WORD_SIZE
  13 #define VCPU_RBX        __VCPU_REGS_RBX * WORD_SIZE
  14 /* Intentionally omit RSP as it's context switched by hardware */
  15 #define VCPU_RBP        __VCPU_REGS_RBP * WORD_SIZE
  16 #define VCPU_RSI        __VCPU_REGS_RSI * WORD_SIZE
  17 #define VCPU_RDI        __VCPU_REGS_RDI * WORD_SIZE
  18 
  19 #ifdef CONFIG_X86_64
  20 #define VCPU_R8         __VCPU_REGS_R8  * WORD_SIZE
  21 #define VCPU_R9         __VCPU_REGS_R9  * WORD_SIZE
  22 #define VCPU_R10        __VCPU_REGS_R10 * WORD_SIZE
  23 #define VCPU_R11        __VCPU_REGS_R11 * WORD_SIZE
  24 #define VCPU_R12        __VCPU_REGS_R12 * WORD_SIZE
  25 #define VCPU_R13        __VCPU_REGS_R13 * WORD_SIZE
  26 #define VCPU_R14        __VCPU_REGS_R14 * WORD_SIZE
  27 #define VCPU_R15        __VCPU_REGS_R15 * WORD_SIZE
  28 #endif
  29 
  30         .text
  31 
  32 /**
  33  * vmx_vmenter - VM-Enter the current loaded VMCS
  34  *
  35  * %RFLAGS.ZF:  !VMCS.LAUNCHED, i.e. controls VMLAUNCH vs. VMRESUME
  36  *
  37  * Returns:
  38  *      %RFLAGS.CF is set on VM-Fail Invalid
  39  *      %RFLAGS.ZF is set on VM-Fail Valid
  40  *      %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit
  41  *
  42  * Note that VMRESUME/VMLAUNCH fall-through and return directly if
  43  * they VM-Fail, whereas a successful VM-Enter + VM-Exit will jump
  44  * to vmx_vmexit.
  45  */
  46 ENTRY(vmx_vmenter)
  47         /* EFLAGS.ZF is set if VMCS.LAUNCHED == 0 */
  48         je 2f
  49 
  50 1:      vmresume
  51         ret
  52 
  53 2:      vmlaunch
  54         ret
  55 
  56 3:      cmpb $0, kvm_rebooting
  57         je 4f
  58         ret
  59 4:      ud2
  60 
  61         .pushsection .fixup, "ax"
  62 5:      jmp 3b
  63         .popsection
  64 
  65         _ASM_EXTABLE(1b, 5b)
  66         _ASM_EXTABLE(2b, 5b)
  67 
  68 ENDPROC(vmx_vmenter)
  69 
  70 /**
  71  * vmx_vmexit - Handle a VMX VM-Exit
  72  *
  73  * Returns:
  74  *      %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit
  75  *
  76  * This is vmx_vmenter's partner in crime.  On a VM-Exit, control will jump
  77  * here after hardware loads the host's state, i.e. this is the destination
  78  * referred to by VMCS.HOST_RIP.
  79  */
  80 ENTRY(vmx_vmexit)
  81 #ifdef CONFIG_RETPOLINE
  82         ALTERNATIVE "jmp .Lvmexit_skip_rsb", "", X86_FEATURE_RETPOLINE
  83         /* Preserve guest's RAX, it's used to stuff the RSB. */
  84         push %_ASM_AX
  85 
  86         /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
  87         FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
  88 
  89         /* Clear RFLAGS.CF and RFLAGS.ZF to preserve VM-Exit, i.e. !VM-Fail. */
  90         or $1, %_ASM_AX
  91 
  92         pop %_ASM_AX
  93 .Lvmexit_skip_rsb:
  94 #endif
  95         ret
  96 ENDPROC(vmx_vmexit)
  97 
  98 /**
  99  * __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode
 100  * @vmx:        struct vcpu_vmx * (forwarded to vmx_update_host_rsp)
 101  * @regs:       unsigned long * (to guest registers)
 102  * @launched:   %true if the VMCS has been launched
 103  *
 104  * Returns:
 105  *      0 on VM-Exit, 1 on VM-Fail
 106  */
 107 ENTRY(__vmx_vcpu_run)
 108         push %_ASM_BP
 109         mov  %_ASM_SP, %_ASM_BP
 110 #ifdef CONFIG_X86_64
 111         push %r15
 112         push %r14
 113         push %r13
 114         push %r12
 115 #else
 116         push %edi
 117         push %esi
 118 #endif
 119         push %_ASM_BX
 120 
 121         /*
 122          * Save @regs, _ASM_ARG2 may be modified by vmx_update_host_rsp() and
 123          * @regs is needed after VM-Exit to save the guest's register values.
 124          */
 125         push %_ASM_ARG2
 126 
 127         /* Copy @launched to BL, _ASM_ARG3 is volatile. */
 128         mov %_ASM_ARG3B, %bl
 129 
 130         /* Adjust RSP to account for the CALL to vmx_vmenter(). */
 131         lea -WORD_SIZE(%_ASM_SP), %_ASM_ARG2
 132         call vmx_update_host_rsp
 133 
 134         /* Load @regs to RAX. */
 135         mov (%_ASM_SP), %_ASM_AX
 136 
 137         /* Check if vmlaunch or vmresume is needed */
 138         cmpb $0, %bl
 139 
 140         /* Load guest registers.  Don't clobber flags. */
 141         mov VCPU_RBX(%_ASM_AX), %_ASM_BX
 142         mov VCPU_RCX(%_ASM_AX), %_ASM_CX
 143         mov VCPU_RDX(%_ASM_AX), %_ASM_DX
 144         mov VCPU_RSI(%_ASM_AX), %_ASM_SI
 145         mov VCPU_RDI(%_ASM_AX), %_ASM_DI
 146         mov VCPU_RBP(%_ASM_AX), %_ASM_BP
 147 #ifdef CONFIG_X86_64
 148         mov VCPU_R8 (%_ASM_AX),  %r8
 149         mov VCPU_R9 (%_ASM_AX),  %r9
 150         mov VCPU_R10(%_ASM_AX), %r10
 151         mov VCPU_R11(%_ASM_AX), %r11
 152         mov VCPU_R12(%_ASM_AX), %r12
 153         mov VCPU_R13(%_ASM_AX), %r13
 154         mov VCPU_R14(%_ASM_AX), %r14
 155         mov VCPU_R15(%_ASM_AX), %r15
 156 #endif
 157         /* Load guest RAX.  This kills the @regs pointer! */
 158         mov VCPU_RAX(%_ASM_AX), %_ASM_AX
 159 
 160         /* Enter guest mode */
 161         call vmx_vmenter
 162 
 163         /* Jump on VM-Fail. */
 164         jbe 2f
 165 
 166         /* Temporarily save guest's RAX. */
 167         push %_ASM_AX
 168 
 169         /* Reload @regs to RAX. */
 170         mov WORD_SIZE(%_ASM_SP), %_ASM_AX
 171 
 172         /* Save all guest registers, including RAX from the stack */
 173         __ASM_SIZE(pop) VCPU_RAX(%_ASM_AX)
 174         mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
 175         mov %_ASM_CX,   VCPU_RCX(%_ASM_AX)
 176         mov %_ASM_DX,   VCPU_RDX(%_ASM_AX)
 177         mov %_ASM_SI,   VCPU_RSI(%_ASM_AX)
 178         mov %_ASM_DI,   VCPU_RDI(%_ASM_AX)
 179         mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
 180 #ifdef CONFIG_X86_64
 181         mov %r8,  VCPU_R8 (%_ASM_AX)
 182         mov %r9,  VCPU_R9 (%_ASM_AX)
 183         mov %r10, VCPU_R10(%_ASM_AX)
 184         mov %r11, VCPU_R11(%_ASM_AX)
 185         mov %r12, VCPU_R12(%_ASM_AX)
 186         mov %r13, VCPU_R13(%_ASM_AX)
 187         mov %r14, VCPU_R14(%_ASM_AX)
 188         mov %r15, VCPU_R15(%_ASM_AX)
 189 #endif
 190 
 191         /* Clear RAX to indicate VM-Exit (as opposed to VM-Fail). */
 192         xor %eax, %eax
 193 
 194         /*
 195          * Clear all general purpose registers except RSP and RAX to prevent
 196          * speculative use of the guest's values, even those that are reloaded
 197          * via the stack.  In theory, an L1 cache miss when restoring registers
 198          * could lead to speculative execution with the guest's values.
 199          * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
 200          * free.  RSP and RAX are exempt as RSP is restored by hardware during
 201          * VM-Exit and RAX is explicitly loaded with 0 or 1 to return VM-Fail.
 202          */
 203 1:      xor %ebx, %ebx
 204         xor %ecx, %ecx
 205         xor %edx, %edx
 206         xor %esi, %esi
 207         xor %edi, %edi
 208         xor %ebp, %ebp
 209 #ifdef CONFIG_X86_64
 210         xor %r8d,  %r8d
 211         xor %r9d,  %r9d
 212         xor %r10d, %r10d
 213         xor %r11d, %r11d
 214         xor %r12d, %r12d
 215         xor %r13d, %r13d
 216         xor %r14d, %r14d
 217         xor %r15d, %r15d
 218 #endif
 219 
 220         /* "POP" @regs. */
 221         add $WORD_SIZE, %_ASM_SP
 222         pop %_ASM_BX
 223 
 224 #ifdef CONFIG_X86_64
 225         pop %r12
 226         pop %r13
 227         pop %r14
 228         pop %r15
 229 #else
 230         pop %esi
 231         pop %edi
 232 #endif
 233         pop %_ASM_BP
 234         ret
 235 
 236         /* VM-Fail.  Out-of-line to avoid a taken Jcc after VM-Exit. */
 237 2:      mov $1, %eax
 238         jmp 1b
 239 ENDPROC(__vmx_vcpu_run)
 240 
 241 /**
 242  * vmread_error_trampoline - Trampoline from inline asm to vmread_error()
 243  * @field:      VMCS field encoding that failed
 244  * @fault:      %true if the VMREAD faulted, %false if it failed
 245 
 246  * Save and restore volatile registers across a call to vmread_error().  Note,
 247  * all parameters are passed on the stack.
 248  */
 249 ENTRY(vmread_error_trampoline)
 250         push %_ASM_BP
 251         mov  %_ASM_SP, %_ASM_BP
 252 
 253         push %_ASM_AX
 254         push %_ASM_CX
 255         push %_ASM_DX
 256 #ifdef CONFIG_X86_64
 257         push %rdi
 258         push %rsi
 259         push %r8
 260         push %r9
 261         push %r10
 262         push %r11
 263 #endif
 264 #ifdef CONFIG_X86_64
 265         /* Load @field and @fault to arg1 and arg2 respectively. */
 266         mov 3*WORD_SIZE(%rbp), %_ASM_ARG2
 267         mov 2*WORD_SIZE(%rbp), %_ASM_ARG1
 268 #else
 269         /* Parameters are passed on the stack for 32-bit (see asmlinkage). */
 270         push 3*WORD_SIZE(%ebp)
 271         push 2*WORD_SIZE(%ebp)
 272 #endif
 273 
 274         call vmread_error
 275 
 276 #ifndef CONFIG_X86_64
 277         add $8, %esp
 278 #endif
 279 
 280         /* Zero out @fault, which will be popped into the result register. */
 281         _ASM_MOV $0, 3*WORD_SIZE(%_ASM_BP)
 282 
 283 #ifdef CONFIG_X86_64
 284         pop %r11
 285         pop %r10
 286         pop %r9
 287         pop %r8
 288         pop %rsi
 289         pop %rdi
 290 #endif
 291         pop %_ASM_DX
 292         pop %_ASM_CX
 293         pop %_ASM_AX
 294         pop %_ASM_BP
 295 
 296         ret
 297 ENDPROC(vmread_error_trampoline)

/* [<][>][^][v][top][bottom][index][help] */