Lines Matching refs:vcpu

471 	struct kvm_vcpu       vcpu;  member
561 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) in to_vmx() argument
563 return container_of(vcpu, struct vcpu_vmx, vcpu); in to_vmx()
782 static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu) in get_vmcs12() argument
784 return to_vmx(vcpu)->nested.current_vmcs12; in get_vmcs12()
787 static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr) in nested_get_page() argument
789 struct page *page = gfn_to_page(vcpu->kvm, addr >> PAGE_SHIFT); in nested_get_page()
806 static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
814 static void vmx_set_segment(struct kvm_vcpu *vcpu,
816 static void vmx_get_segment(struct kvm_vcpu *vcpu,
818 static bool guest_state_valid(struct kvm_vcpu *vcpu);
820 static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu);
892 static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
1181 static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
1184 static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
1520 if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) { in vmx_segment_cache_test_set()
1521 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS); in vmx_segment_cache_test_set()
1565 static void update_exception_bitmap(struct kvm_vcpu *vcpu) in update_exception_bitmap() argument
1571 if ((vcpu->guest_debug & in update_exception_bitmap()
1575 if (to_vmx(vcpu)->rmode.vm86_active) in update_exception_bitmap()
1579 if (vcpu->fpu_active) in update_exception_bitmap()
1587 if (is_guest_mode(vcpu)) in update_exception_bitmap()
1588 eb |= get_vmcs12(vcpu)->exception_bitmap; in update_exception_bitmap()
1721 u64 guest_efer = vmx->vcpu.arch.efer; in update_transition_efer()
1755 (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) { in update_transition_efer()
1809 static void vmx_save_host_state(struct kvm_vcpu *vcpu) in vmx_save_host_state() argument
1811 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_save_host_state()
1855 if (is_long_mode(&vmx->vcpu)) in vmx_save_host_state()
1871 ++vmx->vcpu.stat.host_state_reload; in __vmx_load_host_state()
1874 if (is_long_mode(&vmx->vcpu)) in __vmx_load_host_state()
1903 if (!user_has_fpu() && !vmx->vcpu.guest_fpu_loaded) in __vmx_load_host_state()
1919 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in vmx_vcpu_load() argument
1921 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_load()
1938 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in vmx_vcpu_load()
1967 static void vmx_vcpu_put(struct kvm_vcpu *vcpu) in vmx_vcpu_put() argument
1969 __vmx_load_host_state(to_vmx(vcpu)); in vmx_vcpu_put()
1971 __loaded_vmcs_clear(to_vmx(vcpu)->loaded_vmcs); in vmx_vcpu_put()
1972 vcpu->cpu = -1; in vmx_vcpu_put()
1977 static void vmx_fpu_activate(struct kvm_vcpu *vcpu) in vmx_fpu_activate() argument
1981 if (vcpu->fpu_active) in vmx_fpu_activate()
1983 vcpu->fpu_active = 1; in vmx_fpu_activate()
1986 cr0 |= kvm_read_cr0_bits(vcpu, X86_CR0_TS | X86_CR0_MP); in vmx_fpu_activate()
1988 update_exception_bitmap(vcpu); in vmx_fpu_activate()
1989 vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; in vmx_fpu_activate()
1990 if (is_guest_mode(vcpu)) in vmx_fpu_activate()
1991 vcpu->arch.cr0_guest_owned_bits &= in vmx_fpu_activate()
1992 ~get_vmcs12(vcpu)->cr0_guest_host_mask; in vmx_fpu_activate()
1993 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); in vmx_fpu_activate()
1996 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
2014 static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu) in vmx_fpu_deactivate() argument
2019 vmx_decache_cr0_guest_bits(vcpu); in vmx_fpu_deactivate()
2021 update_exception_bitmap(vcpu); in vmx_fpu_deactivate()
2022 vcpu->arch.cr0_guest_owned_bits = 0; in vmx_fpu_deactivate()
2023 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); in vmx_fpu_deactivate()
2024 if (is_guest_mode(vcpu)) { in vmx_fpu_deactivate()
2033 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); in vmx_fpu_deactivate()
2035 (vcpu->arch.cr0 & X86_CR0_TS); in vmx_fpu_deactivate()
2038 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); in vmx_fpu_deactivate()
2041 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) in vmx_get_rflags() argument
2045 if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) { in vmx_get_rflags()
2046 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); in vmx_get_rflags()
2048 if (to_vmx(vcpu)->rmode.vm86_active) { in vmx_get_rflags()
2050 save_rflags = to_vmx(vcpu)->rmode.save_rflags; in vmx_get_rflags()
2053 to_vmx(vcpu)->rflags = rflags; in vmx_get_rflags()
2055 return to_vmx(vcpu)->rflags; in vmx_get_rflags()
2058 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) in vmx_set_rflags() argument
2060 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); in vmx_set_rflags()
2061 to_vmx(vcpu)->rflags = rflags; in vmx_set_rflags()
2062 if (to_vmx(vcpu)->rmode.vm86_active) { in vmx_set_rflags()
2063 to_vmx(vcpu)->rmode.save_rflags = rflags; in vmx_set_rflags()
2069 static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu) in vmx_get_interrupt_shadow() argument
2082 static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) in vmx_set_interrupt_shadow() argument
2098 static void skip_emulated_instruction(struct kvm_vcpu *vcpu) in skip_emulated_instruction() argument
2102 rip = kvm_rip_read(vcpu); in skip_emulated_instruction()
2104 kvm_rip_write(vcpu, rip); in skip_emulated_instruction()
2107 vmx_set_interrupt_shadow(vcpu, 0); in skip_emulated_instruction()
2114 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr) in nested_vmx_check_exception() argument
2116 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); in nested_vmx_check_exception()
2121 nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason, in nested_vmx_check_exception()
2127 static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, in vmx_queue_exception() argument
2131 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_queue_exception()
2134 if (!reinject && is_guest_mode(vcpu) && in vmx_queue_exception()
2135 nested_vmx_check_exception(vcpu, nr)) in vmx_queue_exception()
2146 inc_eip = vcpu->arch.event_exit_inst_len; in vmx_queue_exception()
2147 if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE) in vmx_queue_exception()
2148 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); in vmx_queue_exception()
2154 vmx->vcpu.arch.event_exit_inst_len); in vmx_queue_exception()
2184 static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu) in vmx_set_msr_bitmap() argument
2188 if (is_guest_mode(vcpu)) in vmx_set_msr_bitmap()
2190 else if (irqchip_in_kernel(vcpu->kvm) && in vmx_set_msr_bitmap()
2191 apic_x2apic_mode(vcpu->arch.apic)) { in vmx_set_msr_bitmap()
2192 if (is_long_mode(vcpu)) in vmx_set_msr_bitmap()
2197 if (is_long_mode(vcpu)) in vmx_set_msr_bitmap()
2217 if (is_long_mode(&vmx->vcpu)) { in setup_msrs()
2235 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE)) in setup_msrs()
2246 vmx_set_msr_bitmap(&vmx->vcpu); in setup_msrs()
2266 static u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) in vmx_read_l1_tsc() argument
2270 tsc_offset = is_guest_mode(vcpu) ? in vmx_read_l1_tsc()
2271 to_vmx(vcpu)->nested.vmcs01_tsc_offset : in vmx_read_l1_tsc()
2280 static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) in vmx_set_tsc_khz() argument
2286 vcpu->arch.tsc_catchup = 1; in vmx_set_tsc_khz()
2287 vcpu->arch.tsc_always_catchup = 1; in vmx_set_tsc_khz()
2292 static u64 vmx_read_tsc_offset(struct kvm_vcpu *vcpu) in vmx_read_tsc_offset() argument
2300 static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) in vmx_write_tsc_offset() argument
2302 if (is_guest_mode(vcpu)) { in vmx_write_tsc_offset()
2310 to_vmx(vcpu)->nested.vmcs01_tsc_offset = offset; in vmx_write_tsc_offset()
2312 vmcs12 = get_vmcs12(vcpu); in vmx_write_tsc_offset()
2317 trace_kvm_write_tsc_offset(vcpu->vcpu_id, in vmx_write_tsc_offset()
2323 static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host) in vmx_adjust_tsc_offset() argument
2328 if (is_guest_mode(vcpu)) { in vmx_adjust_tsc_offset()
2330 to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment; in vmx_adjust_tsc_offset()
2332 trace_kvm_write_tsc_offset(vcpu->vcpu_id, offset, in vmx_adjust_tsc_offset()
2336 static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) in vmx_compute_tsc_offset() argument
2341 static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu) in guest_cpuid_has_vmx() argument
2343 struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0); in guest_cpuid_has_vmx()
2353 static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu) in nested_vmx_allowed() argument
2355 return nested && guest_cpuid_has_vmx(vcpu); in nested_vmx_allowed()
2398 if (vmx_vm_has_apicv(vmx->vcpu.kvm)) in nested_vmx_setup_ctls_msrs()
2543 static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) in vmx_get_vmx_msr() argument
2545 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_vmx_msr()
2643 static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) in vmx_get_msr() argument
2662 vmx_load_host_state(to_vmx(vcpu)); in vmx_get_msr()
2663 data = to_vmx(vcpu)->msr_guest_kernel_gs_base; in vmx_get_msr()
2667 return kvm_get_msr_common(vcpu, msr_index, pdata); in vmx_get_msr()
2686 if (!nested_vmx_allowed(vcpu)) in vmx_get_msr()
2688 data = to_vmx(vcpu)->nested.msr_ia32_feature_control; in vmx_get_msr()
2691 if (!nested_vmx_allowed(vcpu)) in vmx_get_msr()
2693 return vmx_get_vmx_msr(vcpu, msr_index, pdata); in vmx_get_msr()
2697 data = vcpu->arch.ia32_xss; in vmx_get_msr()
2700 if (!to_vmx(vcpu)->rdtscp_enabled) in vmx_get_msr()
2704 msr = find_msr_entry(to_vmx(vcpu), msr_index); in vmx_get_msr()
2709 return kvm_get_msr_common(vcpu, msr_index, pdata); in vmx_get_msr()
2716 static void vmx_leave_nested(struct kvm_vcpu *vcpu);
2723 static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in vmx_set_msr() argument
2725 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_msr()
2733 ret = kvm_set_msr_common(vcpu, msr_info); in vmx_set_msr()
2764 kvm_write_tsc(vcpu, msr_info); in vmx_set_msr()
2768 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) in vmx_set_msr()
2771 vcpu->arch.pat = data; in vmx_set_msr()
2774 ret = kvm_set_msr_common(vcpu, msr_info); in vmx_set_msr()
2777 ret = kvm_set_msr_common(vcpu, msr_info); in vmx_set_msr()
2780 if (!nested_vmx_allowed(vcpu) || in vmx_set_msr()
2781 (to_vmx(vcpu)->nested.msr_ia32_feature_control & in vmx_set_msr()
2786 vmx_leave_nested(vcpu); in vmx_set_msr()
2799 vcpu->arch.ia32_xss = data; in vmx_set_msr()
2800 if (vcpu->arch.ia32_xss != host_xss) in vmx_set_msr()
2802 vcpu->arch.ia32_xss, host_xss); in vmx_set_msr()
2828 ret = kvm_set_msr_common(vcpu, msr_info); in vmx_set_msr()
2834 static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) in vmx_cache_reg() argument
2836 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); in vmx_cache_reg()
2839 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); in vmx_cache_reg()
2842 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); in vmx_cache_reg()
2846 ept_save_pdptrs(vcpu); in vmx_cache_reg()
3272 static bool emulation_required(struct kvm_vcpu *vcpu) in emulation_required() argument
3274 return emulate_invalid_guest_state && !guest_state_valid(vcpu); in emulation_required()
3277 static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg, in fix_pmode_seg() argument
3293 vmx_set_segment(vcpu, save, seg); in fix_pmode_seg()
3296 static void enter_pmode(struct kvm_vcpu *vcpu) in enter_pmode() argument
3299 struct vcpu_vmx *vmx = to_vmx(vcpu); in enter_pmode()
3305 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); in enter_pmode()
3306 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); in enter_pmode()
3307 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); in enter_pmode()
3308 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); in enter_pmode()
3309 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); in enter_pmode()
3310 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); in enter_pmode()
3316 vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); in enter_pmode()
3326 update_exception_bitmap(vcpu); in enter_pmode()
3328 fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); in enter_pmode()
3329 fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); in enter_pmode()
3330 fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); in enter_pmode()
3331 fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); in enter_pmode()
3332 fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); in enter_pmode()
3333 fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); in enter_pmode()
3369 static void enter_rmode(struct kvm_vcpu *vcpu) in enter_rmode() argument
3372 struct vcpu_vmx *vmx = to_vmx(vcpu); in enter_rmode()
3374 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); in enter_rmode()
3375 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); in enter_rmode()
3376 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); in enter_rmode()
3377 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); in enter_rmode()
3378 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); in enter_rmode()
3379 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); in enter_rmode()
3380 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); in enter_rmode()
3388 if (!vcpu->kvm->arch.tss_addr) in enter_rmode()
3394 vmcs_writel(GUEST_TR_BASE, vcpu->kvm->arch.tss_addr); in enter_rmode()
3405 update_exception_bitmap(vcpu); in enter_rmode()
3414 kvm_mmu_reset_context(vcpu); in enter_rmode()
3417 static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) in vmx_set_efer() argument
3419 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_efer()
3429 vmx_load_host_state(to_vmx(vcpu)); in vmx_set_efer()
3430 vcpu->arch.efer = efer; in vmx_set_efer()
3432 vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); in vmx_set_efer()
3435 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); in vmx_set_efer()
3444 static void enter_lmode(struct kvm_vcpu *vcpu) in enter_lmode() argument
3448 vmx_segment_cache_clear(to_vmx(vcpu)); in enter_lmode()
3458 vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA); in enter_lmode()
3461 static void exit_lmode(struct kvm_vcpu *vcpu) in exit_lmode() argument
3463 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); in exit_lmode()
3464 vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA); in exit_lmode()
3469 static void vmx_flush_tlb(struct kvm_vcpu *vcpu) in vmx_flush_tlb() argument
3471 vpid_sync_context(to_vmx(vcpu)); in vmx_flush_tlb()
3473 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in vmx_flush_tlb()
3475 ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa)); in vmx_flush_tlb()
3479 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) in vmx_decache_cr0_guest_bits() argument
3481 ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; in vmx_decache_cr0_guest_bits()
3483 vcpu->arch.cr0 &= ~cr0_guest_owned_bits; in vmx_decache_cr0_guest_bits()
3484 vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits; in vmx_decache_cr0_guest_bits()
3487 static void vmx_decache_cr3(struct kvm_vcpu *vcpu) in vmx_decache_cr3() argument
3489 if (enable_ept && is_paging(vcpu)) in vmx_decache_cr3()
3490 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); in vmx_decache_cr3()
3491 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); in vmx_decache_cr3()
3494 static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) in vmx_decache_cr4_guest_bits() argument
3496 ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits; in vmx_decache_cr4_guest_bits()
3498 vcpu->arch.cr4 &= ~cr4_guest_owned_bits; in vmx_decache_cr4_guest_bits()
3499 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits; in vmx_decache_cr4_guest_bits()
3502 static void ept_load_pdptrs(struct kvm_vcpu *vcpu) in ept_load_pdptrs() argument
3504 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in ept_load_pdptrs()
3507 (unsigned long *)&vcpu->arch.regs_dirty)) in ept_load_pdptrs()
3510 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { in ept_load_pdptrs()
3518 static void ept_save_pdptrs(struct kvm_vcpu *vcpu) in ept_save_pdptrs() argument
3520 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in ept_save_pdptrs()
3522 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { in ept_save_pdptrs()
3530 (unsigned long *)&vcpu->arch.regs_avail); in ept_save_pdptrs()
3532 (unsigned long *)&vcpu->arch.regs_dirty); in ept_save_pdptrs()
3535 static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
3539 struct kvm_vcpu *vcpu) in ept_update_paging_mode_cr0() argument
3541 if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) in ept_update_paging_mode_cr0()
3542 vmx_decache_cr3(vcpu); in ept_update_paging_mode_cr0()
3549 vcpu->arch.cr0 = cr0; in ept_update_paging_mode_cr0()
3550 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); in ept_update_paging_mode_cr0()
3551 } else if (!is_paging(vcpu)) { in ept_update_paging_mode_cr0()
3557 vcpu->arch.cr0 = cr0; in ept_update_paging_mode_cr0()
3558 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); in ept_update_paging_mode_cr0()
3565 static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) in vmx_set_cr0() argument
3567 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_cr0()
3577 enter_pmode(vcpu); in vmx_set_cr0()
3580 enter_rmode(vcpu); in vmx_set_cr0()
3584 if (vcpu->arch.efer & EFER_LME) { in vmx_set_cr0()
3585 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) in vmx_set_cr0()
3586 enter_lmode(vcpu); in vmx_set_cr0()
3587 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) in vmx_set_cr0()
3588 exit_lmode(vcpu); in vmx_set_cr0()
3593 ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu); in vmx_set_cr0()
3595 if (!vcpu->fpu_active) in vmx_set_cr0()
3600 vcpu->arch.cr0 = cr0; in vmx_set_cr0()
3603 vmx->emulation_required = emulation_required(vcpu); in vmx_set_cr0()
3620 static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) in vmx_set_cr3() argument
3629 if (is_paging(vcpu) || is_guest_mode(vcpu)) in vmx_set_cr3()
3630 guest_cr3 = kvm_read_cr3(vcpu); in vmx_set_cr3()
3632 guest_cr3 = vcpu->kvm->arch.ept_identity_map_addr; in vmx_set_cr3()
3633 ept_load_pdptrs(vcpu); in vmx_set_cr3()
3636 vmx_flush_tlb(vcpu); in vmx_set_cr3()
3640 static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) in vmx_set_cr4() argument
3650 (to_vmx(vcpu)->rmode.vm86_active ? in vmx_set_cr4()
3660 if (!nested_vmx_allowed(vcpu)) in vmx_set_cr4()
3663 if (to_vmx(vcpu)->nested.vmxon && in vmx_set_cr4()
3667 vcpu->arch.cr4 = cr4; in vmx_set_cr4()
3669 if (!is_paging(vcpu)) { in vmx_set_cr4()
3677 if (!enable_unrestricted_guest && !is_paging(vcpu)) in vmx_set_cr4()
3692 static void vmx_get_segment(struct kvm_vcpu *vcpu, in vmx_get_segment() argument
3695 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_segment()
3729 static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) in vmx_get_segment_base() argument
3733 if (to_vmx(vcpu)->rmode.vm86_active) { in vmx_get_segment_base()
3734 vmx_get_segment(vcpu, &s, seg); in vmx_get_segment_base()
3737 return vmx_read_guest_seg_base(to_vmx(vcpu), seg); in vmx_get_segment_base()
3740 static int vmx_get_cpl(struct kvm_vcpu *vcpu) in vmx_get_cpl() argument
3742 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_cpl()
3772 static void vmx_set_segment(struct kvm_vcpu *vcpu, in vmx_set_segment() argument
3775 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_segment()
3810 vmx->emulation_required = emulation_required(vcpu); in vmx_set_segment()
3813 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) in vmx_get_cs_db_l_bits() argument
3815 u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS); in vmx_get_cs_db_l_bits()
3821 static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) in vmx_get_idt() argument
3827 static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) in vmx_set_idt() argument
3833 static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) in vmx_get_gdt() argument
3839 static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) in vmx_set_gdt() argument
3845 static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg) in rmode_segment_valid() argument
3850 vmx_get_segment(vcpu, &var, seg); in rmode_segment_valid()
3866 static bool code_segment_valid(struct kvm_vcpu *vcpu) in code_segment_valid() argument
3871 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); in code_segment_valid()
3894 static bool stack_segment_valid(struct kvm_vcpu *vcpu) in stack_segment_valid() argument
3899 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); in stack_segment_valid()
3916 static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg) in data_segment_valid() argument
3921 vmx_get_segment(vcpu, &var, seg); in data_segment_valid()
3941 static bool tr_valid(struct kvm_vcpu *vcpu) in tr_valid() argument
3945 vmx_get_segment(vcpu, &tr, VCPU_SREG_TR); in tr_valid()
3959 static bool ldtr_valid(struct kvm_vcpu *vcpu) in ldtr_valid() argument
3963 vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR); in ldtr_valid()
3977 static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu) in cs_ss_rpl_check() argument
3981 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); in cs_ss_rpl_check()
3982 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); in cs_ss_rpl_check()
3993 static bool guest_state_valid(struct kvm_vcpu *vcpu) in guest_state_valid() argument
3999 if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) { in guest_state_valid()
4000 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS)) in guest_state_valid()
4002 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS)) in guest_state_valid()
4004 if (!rmode_segment_valid(vcpu, VCPU_SREG_DS)) in guest_state_valid()
4006 if (!rmode_segment_valid(vcpu, VCPU_SREG_ES)) in guest_state_valid()
4008 if (!rmode_segment_valid(vcpu, VCPU_SREG_FS)) in guest_state_valid()
4010 if (!rmode_segment_valid(vcpu, VCPU_SREG_GS)) in guest_state_valid()
4014 if (!cs_ss_rpl_check(vcpu)) in guest_state_valid()
4016 if (!code_segment_valid(vcpu)) in guest_state_valid()
4018 if (!stack_segment_valid(vcpu)) in guest_state_valid()
4020 if (!data_segment_valid(vcpu, VCPU_SREG_DS)) in guest_state_valid()
4022 if (!data_segment_valid(vcpu, VCPU_SREG_ES)) in guest_state_valid()
4024 if (!data_segment_valid(vcpu, VCPU_SREG_FS)) in guest_state_valid()
4026 if (!data_segment_valid(vcpu, VCPU_SREG_GS)) in guest_state_valid()
4028 if (!tr_valid(vcpu)) in guest_state_valid()
4030 if (!ldtr_valid(vcpu)) in guest_state_valid()
4366 static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) in vmx_complete_nested_posted_interrupt() argument
4368 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_complete_nested_posted_interrupt()
4403 static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu) in kvm_vcpu_trigger_posted_interrupt() argument
4406 if (vcpu->mode == IN_GUEST_MODE) { in kvm_vcpu_trigger_posted_interrupt()
4407 apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), in kvm_vcpu_trigger_posted_interrupt()
4415 static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, in vmx_deliver_nested_posted_interrupt() argument
4418 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_deliver_nested_posted_interrupt()
4420 if (is_guest_mode(vcpu) && in vmx_deliver_nested_posted_interrupt()
4423 kvm_vcpu_trigger_posted_interrupt(vcpu); in vmx_deliver_nested_posted_interrupt()
4429 kvm_make_request(KVM_REQ_EVENT, vcpu); in vmx_deliver_nested_posted_interrupt()
4441 static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) in vmx_deliver_posted_interrupt() argument
4443 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_deliver_posted_interrupt()
4446 r = vmx_deliver_nested_posted_interrupt(vcpu, vector); in vmx_deliver_posted_interrupt()
4454 kvm_make_request(KVM_REQ_EVENT, vcpu); in vmx_deliver_posted_interrupt()
4455 if (r || !kvm_vcpu_trigger_posted_interrupt(vcpu)) in vmx_deliver_posted_interrupt()
4456 kvm_vcpu_kick(vcpu); in vmx_deliver_posted_interrupt()
4459 static void vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu) in vmx_sync_pir_to_irr() argument
4461 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_sync_pir_to_irr()
4466 kvm_apic_update_irr(vcpu, vmx->pi_desc.pir); in vmx_sync_pir_to_irr()
4469 static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu) in vmx_sync_pir_to_irr_dummy() argument
4530 vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS; in set_cr4_guest_host_mask()
4532 vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; in set_cr4_guest_host_mask()
4533 if (is_guest_mode(&vmx->vcpu)) in set_cr4_guest_host_mask()
4534 vmx->vcpu.arch.cr4_guest_owned_bits &= in set_cr4_guest_host_mask()
4535 ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask; in set_cr4_guest_host_mask()
4536 vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits); in set_cr4_guest_host_mask()
4543 if (!vmx_vm_has_apicv(vmx->vcpu.kvm)) in vmx_pin_based_exec_ctrl()
4552 if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT) in vmx_exec_control()
4555 if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) { in vmx_exec_control()
4572 if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) in vmx_secondary_exec_control()
4586 if (!vmx_vm_has_apicv(vmx->vcpu.kvm)) in vmx_secondary_exec_control()
4647 if (vmx_vm_has_apicv(vmx->vcpu.kvm)) { in vmx_vcpu_setup()
4696 vmx->vcpu.arch.pat = host_pat; in vmx_vcpu_setup()
4729 static void vmx_vcpu_reset(struct kvm_vcpu *vcpu) in vmx_vcpu_reset() argument
4731 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_reset()
4738 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); in vmx_vcpu_reset()
4739 kvm_set_cr8(&vmx->vcpu, 0); in vmx_vcpu_reset()
4741 if (kvm_vcpu_is_reset_bsp(&vmx->vcpu)) in vmx_vcpu_reset()
4744 kvm_set_apic_base(&vmx->vcpu, &apic_base_msr); in vmx_vcpu_reset()
4773 kvm_rip_write(vcpu, 0xfff0); in vmx_vcpu_reset()
4794 if (vm_need_tpr_shadow(vmx->vcpu.kvm)) in vmx_vcpu_reset()
4796 __pa(vmx->vcpu.arch.apic->regs)); in vmx_vcpu_reset()
4800 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); in vmx_vcpu_reset()
4802 if (vmx_vm_has_apicv(vcpu->kvm)) in vmx_vcpu_reset()
4808 vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; in vmx_vcpu_reset()
4809 vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */ in vmx_vcpu_reset()
4810 vmx_set_cr4(&vmx->vcpu, 0); in vmx_vcpu_reset()
4811 vmx_set_efer(&vmx->vcpu, 0); in vmx_vcpu_reset()
4812 vmx_fpu_activate(&vmx->vcpu); in vmx_vcpu_reset()
4813 update_exception_bitmap(&vmx->vcpu); in vmx_vcpu_reset()
4822 static bool nested_exit_on_intr(struct kvm_vcpu *vcpu) in nested_exit_on_intr() argument
4824 return get_vmcs12(vcpu)->pin_based_vm_exec_control & in nested_exit_on_intr()
4832 static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu) in nested_exit_intr_ack_set() argument
4834 return get_vmcs12(vcpu)->vm_exit_controls & in nested_exit_intr_ack_set()
4838 static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu) in nested_exit_on_nmi() argument
4840 return get_vmcs12(vcpu)->pin_based_vm_exec_control & in nested_exit_on_nmi()
4844 static void enable_irq_window(struct kvm_vcpu *vcpu) in enable_irq_window() argument
4853 static void enable_nmi_window(struct kvm_vcpu *vcpu) in enable_nmi_window() argument
4859 enable_irq_window(vcpu); in enable_nmi_window()
4868 static void vmx_inject_irq(struct kvm_vcpu *vcpu) in vmx_inject_irq() argument
4870 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_inject_irq()
4872 int irq = vcpu->arch.interrupt.nr; in vmx_inject_irq()
4876 ++vcpu->stat.irq_injections; in vmx_inject_irq()
4879 if (vcpu->arch.interrupt.soft) in vmx_inject_irq()
4880 inc_eip = vcpu->arch.event_exit_inst_len; in vmx_inject_irq()
4881 if (kvm_inject_realmode_interrupt(vcpu, irq, inc_eip) != EMULATE_DONE) in vmx_inject_irq()
4882 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); in vmx_inject_irq()
4886 if (vcpu->arch.interrupt.soft) { in vmx_inject_irq()
4889 vmx->vcpu.arch.event_exit_inst_len); in vmx_inject_irq()
4895 static void vmx_inject_nmi(struct kvm_vcpu *vcpu) in vmx_inject_nmi() argument
4897 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_inject_nmi()
4899 if (is_guest_mode(vcpu)) in vmx_inject_nmi()
4915 ++vcpu->stat.nmi_injections; in vmx_inject_nmi()
4918 if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE) in vmx_inject_nmi()
4919 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); in vmx_inject_nmi()
4926 static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu) in vmx_get_nmi_mask() argument
4929 return to_vmx(vcpu)->soft_vnmi_blocked; in vmx_get_nmi_mask()
4930 if (to_vmx(vcpu)->nmi_known_unmasked) in vmx_get_nmi_mask()
4935 static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) in vmx_set_nmi_mask() argument
4937 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_nmi_mask()
4955 static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) in vmx_nmi_allowed() argument
4957 if (to_vmx(vcpu)->nested.nested_run_pending) in vmx_nmi_allowed()
4960 if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked) in vmx_nmi_allowed()
4968 static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) in vmx_interrupt_allowed() argument
4970 return (!to_vmx(vcpu)->nested.nested_run_pending && in vmx_interrupt_allowed()
4993 static bool rmode_exception(struct kvm_vcpu *vcpu, int vec) in rmode_exception() argument
5001 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len = in rmode_exception()
5003 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) in rmode_exception()
5007 if (vcpu->guest_debug & in rmode_exception()
5025 static int handle_rmode_exception(struct kvm_vcpu *vcpu, in handle_rmode_exception() argument
5033 if (emulate_instruction(vcpu, 0) == EMULATE_DONE) { in handle_rmode_exception()
5034 if (vcpu->arch.halt_request) { in handle_rmode_exception()
5035 vcpu->arch.halt_request = 0; in handle_rmode_exception()
5036 return kvm_vcpu_halt(vcpu); in handle_rmode_exception()
5048 kvm_queue_exception(vcpu, vec); in handle_rmode_exception()
5071 static int handle_machine_check(struct kvm_vcpu *vcpu) in handle_machine_check() argument
5077 static int handle_exception(struct kvm_vcpu *vcpu) in handle_exception() argument
5079 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_exception()
5080 struct kvm_run *kvm_run = vcpu->run; in handle_exception()
5090 return handle_machine_check(vcpu); in handle_exception()
5096 vmx_fpu_activate(vcpu); in handle_exception()
5101 if (is_guest_mode(vcpu)) { in handle_exception()
5102 kvm_queue_exception(vcpu, UD_VECTOR); in handle_exception()
5105 er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD); in handle_exception()
5107 kvm_queue_exception(vcpu, UD_VECTOR); in handle_exception()
5122 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in handle_exception()
5123 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX; in handle_exception()
5124 vcpu->run->internal.ndata = 3; in handle_exception()
5125 vcpu->run->internal.data[0] = vect_info; in handle_exception()
5126 vcpu->run->internal.data[1] = intr_info; in handle_exception()
5127 vcpu->run->internal.data[2] = error_code; in handle_exception()
5137 if (kvm_event_needs_reinjection(vcpu)) in handle_exception()
5138 kvm_mmu_unprotect_page_virt(vcpu, cr2); in handle_exception()
5139 return kvm_mmu_page_fault(vcpu, cr2, error_code, NULL, 0); in handle_exception()
5144 if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no)) in handle_exception()
5145 return handle_rmode_exception(vcpu, ex_no, error_code); in handle_exception()
5149 kvm_queue_exception_e(vcpu, AC_VECTOR, error_code); in handle_exception()
5153 if (!(vcpu->guest_debug & in handle_exception()
5155 vcpu->arch.dr6 &= ~15; in handle_exception()
5156 vcpu->arch.dr6 |= dr6 | DR6_RTM; in handle_exception()
5158 skip_emulated_instruction(vcpu); in handle_exception()
5160 kvm_queue_exception(vcpu, DB_VECTOR); in handle_exception()
5172 vmx->vcpu.arch.event_exit_inst_len = in handle_exception()
5175 rip = kvm_rip_read(vcpu); in handle_exception()
5188 static int handle_external_interrupt(struct kvm_vcpu *vcpu) in handle_external_interrupt() argument
5190 ++vcpu->stat.irq_exits; in handle_external_interrupt()
5194 static int handle_triple_fault(struct kvm_vcpu *vcpu) in handle_triple_fault() argument
5196 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; in handle_triple_fault()
5200 static int handle_io(struct kvm_vcpu *vcpu) in handle_io() argument
5210 ++vcpu->stat.io_exits; in handle_io()
5213 return emulate_instruction(vcpu, 0) == EMULATE_DONE; in handle_io()
5217 skip_emulated_instruction(vcpu); in handle_io()
5219 return kvm_fast_pio_out(vcpu, size, port); in handle_io()
5223 vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) in vmx_patch_hypercall() argument
5233 static bool nested_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val) in nested_cr0_valid() argument
5236 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); in nested_cr0_valid()
5238 if (to_vmx(vcpu)->nested.nested_vmx_secondary_ctls_high & in nested_cr0_valid()
5246 static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val) in handle_set_cr0() argument
5248 if (is_guest_mode(vcpu)) { in handle_set_cr0()
5249 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); in handle_set_cr0()
5263 if (!nested_cr0_valid(vcpu, val)) in handle_set_cr0()
5266 if (kvm_set_cr0(vcpu, val)) in handle_set_cr0()
5271 if (to_vmx(vcpu)->nested.vmxon && in handle_set_cr0()
5274 return kvm_set_cr0(vcpu, val); in handle_set_cr0()
5278 static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val) in handle_set_cr4() argument
5280 if (is_guest_mode(vcpu)) { in handle_set_cr4()
5281 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); in handle_set_cr4()
5287 if (kvm_set_cr4(vcpu, val)) in handle_set_cr4()
5292 return kvm_set_cr4(vcpu, val); in handle_set_cr4()
5296 static void handle_clts(struct kvm_vcpu *vcpu) in handle_clts() argument
5298 if (is_guest_mode(vcpu)) { in handle_clts()
5306 vcpu->arch.cr0 &= ~X86_CR0_TS; in handle_clts()
5308 vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS)); in handle_clts()
5311 static int handle_cr(struct kvm_vcpu *vcpu) in handle_cr() argument
5323 val = kvm_register_readl(vcpu, reg); in handle_cr()
5327 err = handle_set_cr0(vcpu, val); in handle_cr()
5328 kvm_complete_insn_gp(vcpu, err); in handle_cr()
5331 err = kvm_set_cr3(vcpu, val); in handle_cr()
5332 kvm_complete_insn_gp(vcpu, err); in handle_cr()
5335 err = handle_set_cr4(vcpu, val); in handle_cr()
5336 kvm_complete_insn_gp(vcpu, err); in handle_cr()
5339 u8 cr8_prev = kvm_get_cr8(vcpu); in handle_cr()
5341 err = kvm_set_cr8(vcpu, cr8); in handle_cr()
5342 kvm_complete_insn_gp(vcpu, err); in handle_cr()
5343 if (irqchip_in_kernel(vcpu->kvm)) in handle_cr()
5347 vcpu->run->exit_reason = KVM_EXIT_SET_TPR; in handle_cr()
5353 handle_clts(vcpu); in handle_cr()
5354 trace_kvm_cr_write(0, kvm_read_cr0(vcpu)); in handle_cr()
5355 skip_emulated_instruction(vcpu); in handle_cr()
5356 vmx_fpu_activate(vcpu); in handle_cr()
5361 val = kvm_read_cr3(vcpu); in handle_cr()
5362 kvm_register_write(vcpu, reg, val); in handle_cr()
5364 skip_emulated_instruction(vcpu); in handle_cr()
5367 val = kvm_get_cr8(vcpu); in handle_cr()
5368 kvm_register_write(vcpu, reg, val); in handle_cr()
5370 skip_emulated_instruction(vcpu); in handle_cr()
5376 trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val); in handle_cr()
5377 kvm_lmsw(vcpu, val); in handle_cr()
5379 skip_emulated_instruction(vcpu); in handle_cr()
5384 vcpu->run->exit_reason = 0; in handle_cr()
5385 vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n", in handle_cr()
5390 static int handle_dr(struct kvm_vcpu *vcpu) in handle_dr() argument
5399 if (!kvm_require_dr(vcpu, dr)) in handle_dr()
5403 if (!kvm_require_cpl(vcpu, 0)) in handle_dr()
5412 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { in handle_dr()
5413 vcpu->run->debug.arch.dr6 = vcpu->arch.dr6; in handle_dr()
5414 vcpu->run->debug.arch.dr7 = dr7; in handle_dr()
5415 vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu); in handle_dr()
5416 vcpu->run->debug.arch.exception = DB_VECTOR; in handle_dr()
5417 vcpu->run->exit_reason = KVM_EXIT_DEBUG; in handle_dr()
5420 vcpu->arch.dr6 &= ~15; in handle_dr()
5421 vcpu->arch.dr6 |= DR6_BD | DR6_RTM; in handle_dr()
5422 kvm_queue_exception(vcpu, DB_VECTOR); in handle_dr()
5427 if (vcpu->guest_debug == 0) { in handle_dr()
5439 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; in handle_dr()
5447 if (kvm_get_dr(vcpu, dr, &val)) in handle_dr()
5449 kvm_register_write(vcpu, reg, val); in handle_dr()
5451 if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg))) in handle_dr()
5454 skip_emulated_instruction(vcpu); in handle_dr()
5458 static u64 vmx_get_dr6(struct kvm_vcpu *vcpu) in vmx_get_dr6() argument
5460 return vcpu->arch.dr6; in vmx_get_dr6()
5463 static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val) in vmx_set_dr6() argument
5467 static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) in vmx_sync_dirty_debug_regs() argument
5471 get_debugreg(vcpu->arch.db[0], 0); in vmx_sync_dirty_debug_regs()
5472 get_debugreg(vcpu->arch.db[1], 1); in vmx_sync_dirty_debug_regs()
5473 get_debugreg(vcpu->arch.db[2], 2); in vmx_sync_dirty_debug_regs()
5474 get_debugreg(vcpu->arch.db[3], 3); in vmx_sync_dirty_debug_regs()
5475 get_debugreg(vcpu->arch.dr6, 6); in vmx_sync_dirty_debug_regs()
5476 vcpu->arch.dr7 = vmcs_readl(GUEST_DR7); in vmx_sync_dirty_debug_regs()
5478 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; in vmx_sync_dirty_debug_regs()
5485 static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val) in vmx_set_dr7() argument
5490 static int handle_cpuid(struct kvm_vcpu *vcpu) in handle_cpuid() argument
5492 kvm_emulate_cpuid(vcpu); in handle_cpuid()
5496 static int handle_rdmsr(struct kvm_vcpu *vcpu) in handle_rdmsr() argument
5498 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; in handle_rdmsr()
5501 if (vmx_get_msr(vcpu, ecx, &data)) { in handle_rdmsr()
5503 kvm_inject_gp(vcpu, 0); in handle_rdmsr()
5510 vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u; in handle_rdmsr()
5511 vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u; in handle_rdmsr()
5512 skip_emulated_instruction(vcpu); in handle_rdmsr()
5516 static int handle_wrmsr(struct kvm_vcpu *vcpu) in handle_wrmsr() argument
5519 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; in handle_wrmsr()
5520 u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) in handle_wrmsr()
5521 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); in handle_wrmsr()
5526 if (kvm_set_msr(vcpu, &msr) != 0) { in handle_wrmsr()
5528 kvm_inject_gp(vcpu, 0); in handle_wrmsr()
5533 skip_emulated_instruction(vcpu); in handle_wrmsr()
5537 static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu) in handle_tpr_below_threshold() argument
5539 kvm_make_request(KVM_REQ_EVENT, vcpu); in handle_tpr_below_threshold()
5543 static int handle_interrupt_window(struct kvm_vcpu *vcpu) in handle_interrupt_window() argument
5552 kvm_make_request(KVM_REQ_EVENT, vcpu); in handle_interrupt_window()
5554 ++vcpu->stat.irq_window_exits; in handle_interrupt_window()
5560 if (!irqchip_in_kernel(vcpu->kvm) && in handle_interrupt_window()
5561 vcpu->run->request_interrupt_window && in handle_interrupt_window()
5562 !kvm_cpu_has_interrupt(vcpu)) { in handle_interrupt_window()
5563 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; in handle_interrupt_window()
5569 static int handle_halt(struct kvm_vcpu *vcpu) in handle_halt() argument
5571 return kvm_emulate_halt(vcpu); in handle_halt()
5574 static int handle_vmcall(struct kvm_vcpu *vcpu) in handle_vmcall() argument
5576 kvm_emulate_hypercall(vcpu); in handle_vmcall()
5580 static int handle_invd(struct kvm_vcpu *vcpu) in handle_invd() argument
5582 return emulate_instruction(vcpu, 0) == EMULATE_DONE; in handle_invd()
5585 static int handle_invlpg(struct kvm_vcpu *vcpu) in handle_invlpg() argument
5589 kvm_mmu_invlpg(vcpu, exit_qualification); in handle_invlpg()
5590 skip_emulated_instruction(vcpu); in handle_invlpg()
5594 static int handle_rdpmc(struct kvm_vcpu *vcpu) in handle_rdpmc() argument
5598 err = kvm_rdpmc(vcpu); in handle_rdpmc()
5599 kvm_complete_insn_gp(vcpu, err); in handle_rdpmc()
5604 static int handle_wbinvd(struct kvm_vcpu *vcpu) in handle_wbinvd() argument
5606 kvm_emulate_wbinvd(vcpu); in handle_wbinvd()
5610 static int handle_xsetbv(struct kvm_vcpu *vcpu) in handle_xsetbv() argument
5612 u64 new_bv = kvm_read_edx_eax(vcpu); in handle_xsetbv()
5613 u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX); in handle_xsetbv()
5615 if (kvm_set_xcr(vcpu, index, new_bv) == 0) in handle_xsetbv()
5616 skip_emulated_instruction(vcpu); in handle_xsetbv()
5620 static int handle_xsaves(struct kvm_vcpu *vcpu) in handle_xsaves() argument
5622 skip_emulated_instruction(vcpu); in handle_xsaves()
5627 static int handle_xrstors(struct kvm_vcpu *vcpu) in handle_xrstors() argument
5629 skip_emulated_instruction(vcpu); in handle_xrstors()
5634 static int handle_apic_access(struct kvm_vcpu *vcpu) in handle_apic_access() argument
5649 kvm_lapic_set_eoi(vcpu); in handle_apic_access()
5650 skip_emulated_instruction(vcpu); in handle_apic_access()
5654 return emulate_instruction(vcpu, 0) == EMULATE_DONE; in handle_apic_access()
5657 static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu) in handle_apic_eoi_induced() argument
5663 kvm_apic_set_eoi_accelerated(vcpu, vector); in handle_apic_eoi_induced()
5667 static int handle_apic_write(struct kvm_vcpu *vcpu) in handle_apic_write() argument
5673 kvm_apic_write_nodecode(vcpu, offset); in handle_apic_write()
5677 static int handle_task_switch(struct kvm_vcpu *vcpu) in handle_task_switch() argument
5679 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_task_switch()
5696 vcpu->arch.nmi_injected = false; in handle_task_switch()
5697 vmx_set_nmi_mask(vcpu, true); in handle_task_switch()
5701 kvm_clear_interrupt_queue(vcpu); in handle_task_switch()
5712 kvm_clear_exception_queue(vcpu); in handle_task_switch()
5723 skip_emulated_instruction(vcpu); in handle_task_switch()
5725 if (kvm_task_switch(vcpu, tss_selector, in handle_task_switch()
5728 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in handle_task_switch()
5729 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in handle_task_switch()
5730 vcpu->run->internal.ndata = 0; in handle_task_switch()
5745 static int handle_ept_violation(struct kvm_vcpu *vcpu) in handle_ept_violation() argument
5762 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; in handle_ept_violation()
5763 vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION; in handle_ept_violation()
5773 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && in handle_ept_violation()
5788 vcpu->arch.exit_qualification = exit_qualification; in handle_ept_violation()
5790 return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0); in handle_ept_violation()
5817 static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte, in ept_misconfig_inspect_spte() argument
5856 static int handle_ept_misconfig(struct kvm_vcpu *vcpu) in handle_ept_misconfig() argument
5863 if (!kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) { in handle_ept_misconfig()
5864 skip_emulated_instruction(vcpu); in handle_ept_misconfig()
5868 ret = handle_mmio_page_fault_common(vcpu, gpa, true); in handle_ept_misconfig()
5870 return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) == in handle_ept_misconfig()
5874 return kvm_mmu_page_fault(vcpu, gpa, 0, NULL, 0); in handle_ept_misconfig()
5883 nr_sptes = kvm_mmu_get_spte_hierarchy(vcpu, gpa, sptes); in handle_ept_misconfig()
5886 ept_misconfig_inspect_spte(vcpu, sptes[i-1], i); in handle_ept_misconfig()
5888 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; in handle_ept_misconfig()
5889 vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG; in handle_ept_misconfig()
5894 static int handle_nmi_window(struct kvm_vcpu *vcpu) in handle_nmi_window() argument
5902 ++vcpu->stat.nmi_window_exits; in handle_nmi_window()
5903 kvm_make_request(KVM_REQ_EVENT, vcpu); in handle_nmi_window()
5908 static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) in handle_invalid_guest_state() argument
5910 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_invalid_guest_state()
5921 if (intr_window_requested && vmx_interrupt_allowed(vcpu)) in handle_invalid_guest_state()
5922 return handle_interrupt_window(&vmx->vcpu); in handle_invalid_guest_state()
5924 if (test_bit(KVM_REQ_EVENT, &vcpu->requests)) in handle_invalid_guest_state()
5927 err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE); in handle_invalid_guest_state()
5930 ++vcpu->stat.mmio_exits; in handle_invalid_guest_state()
5936 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in handle_invalid_guest_state()
5937 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in handle_invalid_guest_state()
5938 vcpu->run->internal.ndata = 0; in handle_invalid_guest_state()
5942 if (vcpu->arch.halt_request) { in handle_invalid_guest_state()
5943 vcpu->arch.halt_request = 0; in handle_invalid_guest_state()
5944 ret = kvm_vcpu_halt(vcpu); in handle_invalid_guest_state()
5986 static void grow_ple_window(struct kvm_vcpu *vcpu) in grow_ple_window() argument
5988 struct vcpu_vmx *vmx = to_vmx(vcpu); in grow_ple_window()
5996 trace_kvm_ple_window_grow(vcpu->vcpu_id, vmx->ple_window, old); in grow_ple_window()
5999 static void shrink_ple_window(struct kvm_vcpu *vcpu) in shrink_ple_window() argument
6001 struct vcpu_vmx *vmx = to_vmx(vcpu); in shrink_ple_window()
6010 trace_kvm_ple_window_shrink(vcpu->vcpu_id, vmx->ple_window, old); in shrink_ple_window()
6259 static int handle_pause(struct kvm_vcpu *vcpu) in handle_pause() argument
6262 grow_ple_window(vcpu); in handle_pause()
6264 skip_emulated_instruction(vcpu); in handle_pause()
6265 kvm_vcpu_on_spin(vcpu); in handle_pause()
6270 static int handle_nop(struct kvm_vcpu *vcpu) in handle_nop() argument
6272 skip_emulated_instruction(vcpu); in handle_nop()
6276 static int handle_mwait(struct kvm_vcpu *vcpu) in handle_mwait() argument
6279 return handle_nop(vcpu); in handle_mwait()
6282 static int handle_monitor(struct kvm_vcpu *vcpu) in handle_monitor() argument
6285 return handle_nop(vcpu); in handle_monitor()
6380 static void nested_vmx_succeed(struct kvm_vcpu *vcpu) in nested_vmx_succeed() argument
6382 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu) in nested_vmx_succeed()
6387 static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu) in nested_vmx_failInvalid() argument
6389 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) in nested_vmx_failInvalid()
6395 static void nested_vmx_failValid(struct kvm_vcpu *vcpu, in nested_vmx_failValid() argument
6398 if (to_vmx(vcpu)->nested.current_vmptr == -1ull) { in nested_vmx_failValid()
6403 nested_vmx_failInvalid(vcpu); in nested_vmx_failValid()
6406 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) in nested_vmx_failValid()
6410 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error; in nested_vmx_failValid()
6417 static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator) in nested_vmx_abort() argument
6420 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); in nested_vmx_abort()
6430 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); in vmx_preemption_timer_fn()
6431 kvm_vcpu_kick(&vmx->vcpu); in vmx_preemption_timer_fn()
6442 static int get_vmx_mem_address(struct kvm_vcpu *vcpu, in get_vmx_mem_address() argument
6464 kvm_queue_exception(vcpu, UD_VECTOR); in get_vmx_mem_address()
6470 *ret = vmx_get_segment_base(vcpu, seg_reg); in get_vmx_mem_address()
6472 *ret += kvm_register_read(vcpu, base_reg); in get_vmx_mem_address()
6474 *ret += kvm_register_read(vcpu, index_reg)<<scaling; in get_vmx_mem_address()
6496 static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason, in nested_vmx_check_vmptr() argument
6503 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_check_vmptr()
6504 int maxphyaddr = cpuid_maxphyaddr(vcpu); in nested_vmx_check_vmptr()
6506 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), in nested_vmx_check_vmptr()
6510 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr, in nested_vmx_check_vmptr()
6512 kvm_inject_page_fault(vcpu, &e); in nested_vmx_check_vmptr()
6529 nested_vmx_failInvalid(vcpu); in nested_vmx_check_vmptr()
6530 skip_emulated_instruction(vcpu); in nested_vmx_check_vmptr()
6534 page = nested_get_page(vcpu, vmptr); in nested_vmx_check_vmptr()
6537 nested_vmx_failInvalid(vcpu); in nested_vmx_check_vmptr()
6539 skip_emulated_instruction(vcpu); in nested_vmx_check_vmptr()
6547 nested_vmx_failValid(vcpu, in nested_vmx_check_vmptr()
6549 skip_emulated_instruction(vcpu); in nested_vmx_check_vmptr()
6554 nested_vmx_failValid(vcpu, in nested_vmx_check_vmptr()
6556 skip_emulated_instruction(vcpu); in nested_vmx_check_vmptr()
6562 nested_vmx_failValid(vcpu, in nested_vmx_check_vmptr()
6564 skip_emulated_instruction(vcpu); in nested_vmx_check_vmptr()
6569 nested_vmx_failValid(vcpu, in nested_vmx_check_vmptr()
6571 skip_emulated_instruction(vcpu); in nested_vmx_check_vmptr()
6592 static int handle_vmon(struct kvm_vcpu *vcpu) in handle_vmon() argument
6595 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmon()
6605 if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE) || in handle_vmon()
6606 !kvm_read_cr0_bits(vcpu, X86_CR0_PE) || in handle_vmon()
6607 (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) { in handle_vmon()
6608 kvm_queue_exception(vcpu, UD_VECTOR); in handle_vmon()
6612 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); in handle_vmon()
6613 if (is_long_mode(vcpu) && !cs.l) { in handle_vmon()
6614 kvm_queue_exception(vcpu, UD_VECTOR); in handle_vmon()
6618 if (vmx_get_cpl(vcpu)) { in handle_vmon()
6619 kvm_inject_gp(vcpu, 0); in handle_vmon()
6623 if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL)) in handle_vmon()
6627 nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); in handle_vmon()
6628 skip_emulated_instruction(vcpu); in handle_vmon()
6634 kvm_inject_gp(vcpu, 0); in handle_vmon()
6658 skip_emulated_instruction(vcpu); in handle_vmon()
6659 nested_vmx_succeed(vcpu); in handle_vmon()
6668 static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) in nested_vmx_check_permission() argument
6671 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_check_permission()
6674 kvm_queue_exception(vcpu, UD_VECTOR); in nested_vmx_check_permission()
6678 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); in nested_vmx_check_permission()
6679 if ((vmx_get_rflags(vcpu) & X86_EFLAGS_VM) || in nested_vmx_check_permission()
6680 (is_long_mode(vcpu) && !cs.l)) { in nested_vmx_check_permission()
6681 kvm_queue_exception(vcpu, UD_VECTOR); in nested_vmx_check_permission()
6685 if (vmx_get_cpl(vcpu)) { in nested_vmx_check_permission()
6686 kvm_inject_gp(vcpu, 0); in nested_vmx_check_permission()
6753 static int handle_vmoff(struct kvm_vcpu *vcpu) in handle_vmoff() argument
6755 if (!nested_vmx_check_permission(vcpu)) in handle_vmoff()
6757 free_nested(to_vmx(vcpu)); in handle_vmoff()
6758 skip_emulated_instruction(vcpu); in handle_vmoff()
6759 nested_vmx_succeed(vcpu); in handle_vmoff()
6764 static int handle_vmclear(struct kvm_vcpu *vcpu) in handle_vmclear() argument
6766 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmclear()
6771 if (!nested_vmx_check_permission(vcpu)) in handle_vmclear()
6774 if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr)) in handle_vmclear()
6780 page = nested_get_page(vcpu, vmptr); in handle_vmclear()
6789 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); in handle_vmclear()
6799 skip_emulated_instruction(vcpu); in handle_vmclear()
6800 nested_vmx_succeed(vcpu); in handle_vmclear()
6804 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
6807 static int handle_vmlaunch(struct kvm_vcpu *vcpu) in handle_vmlaunch() argument
6809 return nested_vmx_run(vcpu, true); in handle_vmlaunch()
6813 static int handle_vmresume(struct kvm_vcpu *vcpu) in handle_vmresume() argument
6816 return nested_vmx_run(vcpu, false); in handle_vmresume()
6845 static inline int vmcs12_read_any(struct kvm_vcpu *vcpu, in vmcs12_read_any() argument
6854 p = ((char *)(get_vmcs12(vcpu))) + offset; in vmcs12_read_any()
6876 static inline int vmcs12_write_any(struct kvm_vcpu *vcpu, in vmcs12_write_any() argument
6879 char *p = ((char *) get_vmcs12(vcpu)) + offset; in vmcs12_write_any()
6935 vmcs12_write_any(&vmx->vcpu, field, field_value); in copy_shadow_to_vmcs12()
6964 vmcs12_read_any(&vmx->vcpu, field, &field_value); in copy_vmcs12_to_shadow()
6994 static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu) in nested_vmx_check_vmcs12() argument
6996 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_check_vmcs12()
6998 nested_vmx_failInvalid(vcpu); in nested_vmx_check_vmcs12()
6999 skip_emulated_instruction(vcpu); in nested_vmx_check_vmcs12()
7005 static int handle_vmread(struct kvm_vcpu *vcpu) in handle_vmread() argument
7013 if (!nested_vmx_check_permission(vcpu) || in handle_vmread()
7014 !nested_vmx_check_vmcs12(vcpu)) in handle_vmread()
7018 field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); in handle_vmread()
7020 if (vmcs12_read_any(vcpu, field, &field_value) < 0) { in handle_vmread()
7021 nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); in handle_vmread()
7022 skip_emulated_instruction(vcpu); in handle_vmread()
7031 kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf), in handle_vmread()
7034 if (get_vmx_mem_address(vcpu, exit_qualification, in handle_vmread()
7038 kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva, in handle_vmread()
7039 &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL); in handle_vmread()
7042 nested_vmx_succeed(vcpu); in handle_vmread()
7043 skip_emulated_instruction(vcpu); in handle_vmread()
7048 static int handle_vmwrite(struct kvm_vcpu *vcpu) in handle_vmwrite() argument
7063 if (!nested_vmx_check_permission(vcpu) || in handle_vmwrite()
7064 !nested_vmx_check_vmcs12(vcpu)) in handle_vmwrite()
7068 field_value = kvm_register_readl(vcpu, in handle_vmwrite()
7071 if (get_vmx_mem_address(vcpu, exit_qualification, in handle_vmwrite()
7074 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, in handle_vmwrite()
7075 &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) { in handle_vmwrite()
7076 kvm_inject_page_fault(vcpu, &e); in handle_vmwrite()
7082 field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); in handle_vmwrite()
7084 nested_vmx_failValid(vcpu, in handle_vmwrite()
7086 skip_emulated_instruction(vcpu); in handle_vmwrite()
7090 if (vmcs12_write_any(vcpu, field, field_value) < 0) { in handle_vmwrite()
7091 nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); in handle_vmwrite()
7092 skip_emulated_instruction(vcpu); in handle_vmwrite()
7096 nested_vmx_succeed(vcpu); in handle_vmwrite()
7097 skip_emulated_instruction(vcpu); in handle_vmwrite()
7102 static int handle_vmptrld(struct kvm_vcpu *vcpu) in handle_vmptrld() argument
7104 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmptrld()
7108 if (!nested_vmx_check_permission(vcpu)) in handle_vmptrld()
7111 if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr)) in handle_vmptrld()
7117 page = nested_get_page(vcpu, vmptr); in handle_vmptrld()
7119 nested_vmx_failInvalid(vcpu); in handle_vmptrld()
7120 skip_emulated_instruction(vcpu); in handle_vmptrld()
7127 nested_vmx_failValid(vcpu, in handle_vmptrld()
7129 skip_emulated_instruction(vcpu); in handle_vmptrld()
7147 nested_vmx_succeed(vcpu); in handle_vmptrld()
7148 skip_emulated_instruction(vcpu); in handle_vmptrld()
7153 static int handle_vmptrst(struct kvm_vcpu *vcpu) in handle_vmptrst() argument
7160 if (!nested_vmx_check_permission(vcpu)) in handle_vmptrst()
7163 if (get_vmx_mem_address(vcpu, exit_qualification, in handle_vmptrst()
7167 if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva, in handle_vmptrst()
7168 (void *)&to_vmx(vcpu)->nested.current_vmptr, in handle_vmptrst()
7170 kvm_inject_page_fault(vcpu, &e); in handle_vmptrst()
7173 nested_vmx_succeed(vcpu); in handle_vmptrst()
7174 skip_emulated_instruction(vcpu); in handle_vmptrst()
7179 static int handle_invept(struct kvm_vcpu *vcpu) in handle_invept() argument
7181 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_invept()
7193 kvm_queue_exception(vcpu, UD_VECTOR); in handle_invept()
7197 if (!nested_vmx_check_permission(vcpu)) in handle_invept()
7200 if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) { in handle_invept()
7201 kvm_queue_exception(vcpu, UD_VECTOR); in handle_invept()
7206 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); in handle_invept()
7211 nested_vmx_failValid(vcpu, in handle_invept()
7213 skip_emulated_instruction(vcpu); in handle_invept()
7220 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), in handle_invept()
7223 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand, in handle_invept()
7225 kvm_inject_page_fault(vcpu, &e); in handle_invept()
7231 kvm_mmu_sync_roots(vcpu); in handle_invept()
7232 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in handle_invept()
7233 nested_vmx_succeed(vcpu); in handle_invept()
7241 skip_emulated_instruction(vcpu); in handle_invept()
7245 static int handle_invvpid(struct kvm_vcpu *vcpu) in handle_invvpid() argument
7247 kvm_queue_exception(vcpu, UD_VECTOR); in handle_invvpid()
7251 static int handle_pml_full(struct kvm_vcpu *vcpu) in handle_pml_full() argument
7255 trace_kvm_pml_full(vcpu->vcpu_id); in handle_pml_full()
7263 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && in handle_pml_full()
7281 static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
7330 static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, in nested_vmx_exit_handled_io() argument
7360 if (kvm_read_guest(vcpu->kvm, bitmap, &b, 1)) in nested_vmx_exit_handled_io()
7379 static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, in nested_vmx_exit_handled_msr() argument
7382 u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX]; in nested_vmx_exit_handled_msr()
7404 if (kvm_read_guest(vcpu->kvm, bitmap + msr_index/8, &b, 1)) in nested_vmx_exit_handled_msr()
7416 static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, in nested_vmx_exit_handled_cr() argument
7422 unsigned long val = kvm_register_readl(vcpu, reg); in nested_vmx_exit_handled_cr()
7497 static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) in nested_vmx_exit_handled() argument
7500 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_exit_handled()
7501 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); in nested_vmx_exit_handled()
7504 trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason, in nested_vmx_exit_handled()
7542 if (kvm_register_read(vcpu, VCPU_REGS_RAX) == 0xa) in nested_vmx_exit_handled()
7567 return nested_vmx_exit_handled_cr(vcpu, vmcs12); in nested_vmx_exit_handled()
7571 return nested_vmx_exit_handled_io(vcpu, vmcs12); in nested_vmx_exit_handled()
7574 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason); in nested_vmx_exit_handled()
7629 static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2) in vmx_get_exit_info() argument
7671 struct kvm *kvm = vmx->vcpu.kvm; in vmx_flush_pml_buffer()
7707 struct kvm_vcpu *vcpu; in kvm_flush_pml_buffers() local
7714 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_flush_pml_buffers()
7715 kvm_vcpu_kick(vcpu); in kvm_flush_pml_buffers()
7722 static int vmx_handle_exit(struct kvm_vcpu *vcpu) in vmx_handle_exit() argument
7724 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_handle_exit()
7740 return handle_invalid_guest_state(vcpu); in vmx_handle_exit()
7742 if (is_guest_mode(vcpu) && nested_vmx_exit_handled(vcpu)) { in vmx_handle_exit()
7743 nested_vmx_vmexit(vcpu, exit_reason, in vmx_handle_exit()
7750 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; in vmx_handle_exit()
7751 vcpu->run->fail_entry.hardware_entry_failure_reason in vmx_handle_exit()
7757 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; in vmx_handle_exit()
7758 vcpu->run->fail_entry.hardware_entry_failure_reason in vmx_handle_exit()
7774 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in vmx_handle_exit()
7775 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; in vmx_handle_exit()
7776 vcpu->run->internal.ndata = 2; in vmx_handle_exit()
7777 vcpu->run->internal.data[0] = vectoring_info; in vmx_handle_exit()
7778 vcpu->run->internal.data[1] = exit_reason; in vmx_handle_exit()
7783 !(is_guest_mode(vcpu) && nested_cpu_has_virtual_nmis( in vmx_handle_exit()
7784 get_vmcs12(vcpu))))) { in vmx_handle_exit()
7785 if (vmx_interrupt_allowed(vcpu)) { in vmx_handle_exit()
7788 vcpu->arch.nmi_pending) { in vmx_handle_exit()
7797 __func__, vcpu->vcpu_id); in vmx_handle_exit()
7804 return kvm_vmx_exit_handlers[exit_reason](vcpu); in vmx_handle_exit()
7807 kvm_queue_exception(vcpu, UD_VECTOR); in vmx_handle_exit()
7812 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) in update_cr8_intercept() argument
7814 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); in update_cr8_intercept()
7816 if (is_guest_mode(vcpu) && in update_cr8_intercept()
7828 static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) in vmx_set_virtual_x2apic_mode() argument
7837 !vmx_vm_has_apicv(vcpu->kvm)) in vmx_set_virtual_x2apic_mode()
7840 if (!vm_need_tpr_shadow(vcpu->kvm)) in vmx_set_virtual_x2apic_mode()
7854 vmx_set_msr_bitmap(vcpu); in vmx_set_virtual_x2apic_mode()
7857 static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa) in vmx_set_apic_access_page_addr() argument
7859 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_apic_access_page_addr()
7874 if (!is_guest_mode(vcpu) || in vmx_set_apic_access_page_addr()
7914 static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr) in vmx_hwapic_irr_update() argument
7916 if (!is_guest_mode(vcpu)) { in vmx_hwapic_irr_update()
7928 if (nested_exit_on_intr(vcpu)) in vmx_hwapic_irr_update()
7935 if (!kvm_event_needs_reinjection(vcpu) && in vmx_hwapic_irr_update()
7936 vmx_interrupt_allowed(vcpu)) { in vmx_hwapic_irr_update()
7937 kvm_queue_interrupt(vcpu, max_irr, false); in vmx_hwapic_irr_update()
7938 vmx_inject_irq(vcpu); in vmx_hwapic_irr_update()
7942 static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) in vmx_load_eoi_exitmap() argument
7944 if (!vmx_vm_has_apicv(vcpu->kvm)) in vmx_load_eoi_exitmap()
7971 kvm_before_handle_nmi(&vmx->vcpu); in vmx_complete_atomic_exit()
7973 kvm_after_handle_nmi(&vmx->vcpu); in vmx_complete_atomic_exit()
7977 static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) in vmx_handle_external_intr() argument
7991 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_handle_external_intr()
8077 static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu, in __vmx_complete_interrupts() argument
8088 vcpu->arch.nmi_injected = false; in __vmx_complete_interrupts()
8089 kvm_clear_exception_queue(vcpu); in __vmx_complete_interrupts()
8090 kvm_clear_interrupt_queue(vcpu); in __vmx_complete_interrupts()
8095 kvm_make_request(KVM_REQ_EVENT, vcpu); in __vmx_complete_interrupts()
8102 vcpu->arch.nmi_injected = true; in __vmx_complete_interrupts()
8108 vmx_set_nmi_mask(vcpu, false); in __vmx_complete_interrupts()
8111 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); in __vmx_complete_interrupts()
8116 kvm_requeue_exception_e(vcpu, vector, err); in __vmx_complete_interrupts()
8118 kvm_requeue_exception(vcpu, vector); in __vmx_complete_interrupts()
8121 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); in __vmx_complete_interrupts()
8124 kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR); in __vmx_complete_interrupts()
8133 __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info, in vmx_complete_interrupts()
8138 static void vmx_cancel_injection(struct kvm_vcpu *vcpu) in vmx_cancel_injection() argument
8140 __vmx_complete_interrupts(vcpu, in vmx_cancel_injection()
8166 static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) in vmx_vcpu_run() argument
8168 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_run()
8190 if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) in vmx_vcpu_run()
8191 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); in vmx_vcpu_run()
8192 if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) in vmx_vcpu_run()
8193 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); in vmx_vcpu_run()
8206 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) in vmx_vcpu_run()
8207 vmx_set_interrupt_shadow(vcpu, 0); in vmx_vcpu_run()
8290 [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])), in vmx_vcpu_run()
8291 [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])), in vmx_vcpu_run()
8292 [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])), in vmx_vcpu_run()
8293 [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])), in vmx_vcpu_run()
8294 [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])), in vmx_vcpu_run()
8295 [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])), in vmx_vcpu_run()
8296 [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])), in vmx_vcpu_run()
8298 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])), in vmx_vcpu_run()
8299 [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])), in vmx_vcpu_run()
8300 [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])), in vmx_vcpu_run()
8301 [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])), in vmx_vcpu_run()
8302 [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])), in vmx_vcpu_run()
8303 [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])), in vmx_vcpu_run()
8304 [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])), in vmx_vcpu_run()
8305 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])), in vmx_vcpu_run()
8307 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)), in vmx_vcpu_run()
8335 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) in vmx_vcpu_run()
8340 vcpu->arch.regs_dirty = 0; in vmx_vcpu_run()
8347 trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX); in vmx_vcpu_run()
8355 kvm_make_request(KVM_REQ_EVENT, vcpu); in vmx_vcpu_run()
8364 static void vmx_load_vmcs01(struct kvm_vcpu *vcpu) in vmx_load_vmcs01() argument
8366 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_load_vmcs01()
8374 vmx_vcpu_put(vcpu); in vmx_load_vmcs01()
8375 vmx_vcpu_load(vcpu, cpu); in vmx_load_vmcs01()
8376 vcpu->cpu = cpu; in vmx_load_vmcs01()
8380 static void vmx_free_vcpu(struct kvm_vcpu *vcpu) in vmx_free_vcpu() argument
8382 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_free_vcpu()
8387 leave_guest_mode(vcpu); in vmx_free_vcpu()
8388 vmx_load_vmcs01(vcpu); in vmx_free_vcpu()
8392 kvm_vcpu_uninit(vcpu); in vmx_free_vcpu()
8407 err = kvm_vcpu_init(&vmx->vcpu, kvm, id); in vmx_create_vcpu()
8431 vmx_vcpu_load(&vmx->vcpu, cpu); in vmx_create_vcpu()
8432 vmx->vcpu.cpu = cpu; in vmx_create_vcpu()
8434 vmx_vcpu_put(&vmx->vcpu); in vmx_create_vcpu()
8472 return &vmx->vcpu; in vmx_create_vcpu()
8479 kvm_vcpu_uninit(&vmx->vcpu); in vmx_create_vcpu()
8505 static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) in vmx_get_mt_mask() argument
8522 else if (kvm_arch_has_noncoherent_dma(vcpu->kvm)) in vmx_get_mt_mask()
8523 ret = kvm_get_guest_memory_type(vcpu, gfn) << in vmx_get_mt_mask()
8541 static void vmx_cpuid_update(struct kvm_vcpu *vcpu) in vmx_cpuid_update() argument
8544 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_cpuid_update()
8551 best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); in vmx_cpuid_update()
8566 best = kvm_find_cpuid_entry(vcpu, 0x7, 0); in vmx_cpuid_update()
8569 guest_cpuid_has_pcid(vcpu)) { in vmx_cpuid_update()
8592 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, in nested_ept_inject_page_fault() argument
8595 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); in nested_ept_inject_page_fault()
8602 nested_vmx_vmexit(vcpu, exit_reason, 0, vcpu->arch.exit_qualification); in nested_ept_inject_page_fault()
8608 static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu) in nested_ept_get_cr3() argument
8611 return get_vmcs12(vcpu)->ept_pointer; in nested_ept_get_cr3()
8614 static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) in nested_ept_init_mmu_context() argument
8616 WARN_ON(mmu_is_nested(vcpu)); in nested_ept_init_mmu_context()
8617 kvm_init_shadow_ept_mmu(vcpu, in nested_ept_init_mmu_context()
8618 to_vmx(vcpu)->nested.nested_vmx_ept_caps & in nested_ept_init_mmu_context()
8620 vcpu->arch.mmu.set_cr3 = vmx_set_cr3; in nested_ept_init_mmu_context()
8621 vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3; in nested_ept_init_mmu_context()
8622 vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault; in nested_ept_init_mmu_context()
8624 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; in nested_ept_init_mmu_context()
8627 static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) in nested_ept_uninit_mmu_context() argument
8629 vcpu->arch.walk_mmu = &vcpu->arch.mmu; in nested_ept_uninit_mmu_context()
8644 static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu, in vmx_inject_page_fault_nested() argument
8647 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); in vmx_inject_page_fault_nested()
8649 WARN_ON(!is_guest_mode(vcpu)); in vmx_inject_page_fault_nested()
8652 nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason, in vmx_inject_page_fault_nested()
8656 kvm_inject_page_fault(vcpu, fault); in vmx_inject_page_fault_nested()
8659 static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, in nested_get_vmcs12_pages() argument
8662 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_get_vmcs12_pages()
8663 int maxphyaddr = cpuid_maxphyaddr(vcpu); in nested_get_vmcs12_pages()
8679 nested_get_page(vcpu, vmcs12->apic_access_addr); in nested_get_vmcs12_pages()
8690 nested_get_page(vcpu, vmcs12->virtual_apic_page_addr); in nested_get_vmcs12_pages()
8716 nested_get_page(vcpu, vmcs12->posted_intr_desc_addr); in nested_get_vmcs12_pages()
8735 static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu) in vmx_start_preemption_timer() argument
8737 u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value; in vmx_start_preemption_timer()
8738 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_start_preemption_timer()
8740 if (vcpu->arch.virtual_tsc_khz == 0) in vmx_start_preemption_timer()
8752 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz); in vmx_start_preemption_timer()
8757 static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu, in nested_vmx_check_msr_bitmap_controls() argument
8766 if (vmcs12_read_any(vcpu, MSR_BITMAP, &addr)) { in nested_vmx_check_msr_bitmap_controls()
8770 maxphyaddr = cpuid_maxphyaddr(vcpu); in nested_vmx_check_msr_bitmap_controls()
8783 static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, in nested_vmx_merge_msr_bitmap() argument
8793 page = nested_get_page(vcpu, vmcs12->msr_bitmap); in nested_vmx_merge_msr_bitmap()
8862 static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, in nested_vmx_check_apicv_controls() argument
8884 !nested_exit_on_intr(vcpu)) in nested_vmx_check_apicv_controls()
8894 !nested_exit_intr_ack_set(vcpu) || in nested_vmx_check_apicv_controls()
8905 static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, in nested_vmx_check_msr_switch() argument
8912 if (vmcs12_read_any(vcpu, count_field, &count) || in nested_vmx_check_msr_switch()
8913 vmcs12_read_any(vcpu, addr_field, &addr)) { in nested_vmx_check_msr_switch()
8919 maxphyaddr = cpuid_maxphyaddr(vcpu); in nested_vmx_check_msr_switch()
8930 static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu, in nested_vmx_check_msr_switch_controls() argument
8937 if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT, in nested_vmx_check_msr_switch_controls()
8939 nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT, in nested_vmx_check_msr_switch_controls()
8941 nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT, in nested_vmx_check_msr_switch_controls()
8947 static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu, in nested_vmx_msr_check_common() argument
8951 if (apic_x2apic_mode(vcpu->arch.apic) && e->index >> 8 == 0x8) in nested_vmx_msr_check_common()
8961 static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu, in nested_vmx_load_msr_check() argument
8967 nested_vmx_msr_check_common(vcpu, e)) in nested_vmx_load_msr_check()
8972 static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu, in nested_vmx_store_msr_check() argument
8976 nested_vmx_msr_check_common(vcpu, e)) in nested_vmx_store_msr_check()
8985 static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) in nested_vmx_load_msr() argument
8993 if (kvm_read_guest(vcpu->kvm, gpa + i * sizeof(e), in nested_vmx_load_msr()
9000 if (nested_vmx_load_msr_check(vcpu, &e)) { in nested_vmx_load_msr()
9008 if (kvm_set_msr(vcpu, &msr)) { in nested_vmx_load_msr()
9020 static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) in nested_vmx_store_msr() argument
9026 if (kvm_read_guest(vcpu->kvm, in nested_vmx_store_msr()
9034 if (nested_vmx_store_msr_check(vcpu, &e)) { in nested_vmx_store_msr()
9040 if (kvm_get_msr(vcpu, e.index, &e.value)) { in nested_vmx_store_msr()
9046 if (kvm_write_guest(vcpu->kvm, in nested_vmx_store_msr()
9068 static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) in prepare_vmcs02() argument
9070 struct vcpu_vmx *vmx = to_vmx(vcpu); in prepare_vmcs02()
9111 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); in prepare_vmcs02()
9114 kvm_set_dr(vcpu, 7, vcpu->arch.dr7); in prepare_vmcs02()
9126 vmx_set_rflags(vcpu, vmcs12->guest_rflags); in prepare_vmcs02()
9159 vmx_start_preemption_timer(vcpu); in prepare_vmcs02()
9213 (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))) { in prepare_vmcs02()
9216 kvm_vcpu_reload_apic_access_page(vcpu); in prepare_vmcs02()
9267 nested_vmx_merge_msr_bitmap(vcpu, vmcs12); in prepare_vmcs02()
9285 update_exception_bitmap(vcpu); in prepare_vmcs02()
9286 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; in prepare_vmcs02()
9287 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); in prepare_vmcs02()
9305 vcpu->arch.pat = vmcs12->guest_ia32_pat; in prepare_vmcs02()
9307 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); in prepare_vmcs02()
9328 vmx_flush_tlb(vcpu); in prepare_vmcs02()
9332 kvm_mmu_unload(vcpu); in prepare_vmcs02()
9333 nested_ept_init_mmu_context(vcpu); in prepare_vmcs02()
9337 vcpu->arch.efer = vmcs12->guest_ia32_efer; in prepare_vmcs02()
9339 vcpu->arch.efer |= (EFER_LMA | EFER_LME); in prepare_vmcs02()
9341 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); in prepare_vmcs02()
9343 vmx_set_efer(vcpu, vcpu->arch.efer); in prepare_vmcs02()
9353 vmx_set_cr0(vcpu, vmcs12->guest_cr0); in prepare_vmcs02()
9356 vmx_set_cr4(vcpu, vmcs12->guest_cr4); in prepare_vmcs02()
9360 kvm_set_cr3(vcpu, vmcs12->guest_cr3); in prepare_vmcs02()
9361 kvm_mmu_reset_context(vcpu); in prepare_vmcs02()
9364 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; in prepare_vmcs02()
9376 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp); in prepare_vmcs02()
9377 kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip); in prepare_vmcs02()
9384 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) in nested_vmx_run() argument
9387 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_run()
9393 if (!nested_vmx_check_permission(vcpu) || in nested_vmx_run()
9394 !nested_vmx_check_vmcs12(vcpu)) in nested_vmx_run()
9397 skip_emulated_instruction(vcpu); in nested_vmx_run()
9398 vmcs12 = get_vmcs12(vcpu); in nested_vmx_run()
9414 nested_vmx_failValid(vcpu, in nested_vmx_run()
9422 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); in nested_vmx_run()
9426 if (!nested_get_vmcs12_pages(vcpu, vmcs12)) { in nested_vmx_run()
9427 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); in nested_vmx_run()
9431 if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12)) { in nested_vmx_run()
9432 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); in nested_vmx_run()
9436 if (nested_vmx_check_apicv_controls(vcpu, vmcs12)) { in nested_vmx_run()
9437 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); in nested_vmx_run()
9441 if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12)) { in nested_vmx_run()
9442 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); in nested_vmx_run()
9462 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); in nested_vmx_run()
9468 nested_vmx_failValid(vcpu, in nested_vmx_run()
9473 if (!nested_cr0_valid(vcpu, vmcs12->guest_cr0) || in nested_vmx_run()
9475 nested_vmx_entry_failure(vcpu, vmcs12, in nested_vmx_run()
9480 nested_vmx_entry_failure(vcpu, vmcs12, in nested_vmx_run()
9496 if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) || in nested_vmx_run()
9500 nested_vmx_entry_failure(vcpu, vmcs12, in nested_vmx_run()
9515 if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) || in nested_vmx_run()
9518 nested_vmx_entry_failure(vcpu, vmcs12, in nested_vmx_run()
9533 enter_guest_mode(vcpu); in nested_vmx_run()
9542 vmx_vcpu_put(vcpu); in nested_vmx_run()
9543 vmx_vcpu_load(vcpu, cpu); in nested_vmx_run()
9544 vcpu->cpu = cpu; in nested_vmx_run()
9549 prepare_vmcs02(vcpu, vmcs12); in nested_vmx_run()
9551 msr_entry_idx = nested_vmx_load_msr(vcpu, in nested_vmx_run()
9555 leave_guest_mode(vcpu); in nested_vmx_run()
9556 vmx_load_vmcs01(vcpu); in nested_vmx_run()
9557 nested_vmx_entry_failure(vcpu, vmcs12, in nested_vmx_run()
9565 return kvm_vcpu_halt(vcpu); in nested_vmx_run()
9596 vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) in vmcs12_guest_cr0() argument
9599 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) | in vmcs12_guest_cr0()
9602 vcpu->arch.cr0_guest_owned_bits)); in vmcs12_guest_cr0()
9606 vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) in vmcs12_guest_cr4() argument
9609 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) | in vmcs12_guest_cr4()
9612 vcpu->arch.cr4_guest_owned_bits)); in vmcs12_guest_cr4()
9615 static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, in vmcs12_save_pending_event() argument
9621 if (vcpu->arch.exception.pending && vcpu->arch.exception.reinject) { in vmcs12_save_pending_event()
9622 nr = vcpu->arch.exception.nr; in vmcs12_save_pending_event()
9627 vcpu->arch.event_exit_inst_len; in vmcs12_save_pending_event()
9632 if (vcpu->arch.exception.has_error_code) { in vmcs12_save_pending_event()
9635 vcpu->arch.exception.error_code; in vmcs12_save_pending_event()
9639 } else if (vcpu->arch.nmi_injected) { in vmcs12_save_pending_event()
9642 } else if (vcpu->arch.interrupt.pending) { in vmcs12_save_pending_event()
9643 nr = vcpu->arch.interrupt.nr; in vmcs12_save_pending_event()
9646 if (vcpu->arch.interrupt.soft) { in vmcs12_save_pending_event()
9649 vcpu->arch.event_exit_inst_len; in vmcs12_save_pending_event()
9657 static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) in vmx_check_nested_events() argument
9659 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_check_nested_events()
9661 if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) && in vmx_check_nested_events()
9665 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0); in vmx_check_nested_events()
9669 if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) { in vmx_check_nested_events()
9671 vcpu->arch.interrupt.pending) in vmx_check_nested_events()
9673 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, in vmx_check_nested_events()
9680 vcpu->arch.nmi_pending = 0; in vmx_check_nested_events()
9681 vmx_set_nmi_mask(vcpu, true); in vmx_check_nested_events()
9685 if ((kvm_cpu_has_interrupt(vcpu) || external_intr) && in vmx_check_nested_events()
9686 nested_exit_on_intr(vcpu)) { in vmx_check_nested_events()
9689 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); in vmx_check_nested_events()
9693 return vmx_complete_nested_posted_interrupt(vcpu); in vmx_check_nested_events()
9696 static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu) in vmx_get_preemption_timer_value() argument
9699 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer); in vmx_get_preemption_timer_value()
9705 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz; in vmx_get_preemption_timer_value()
9721 static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, in prepare_vmcs12() argument
9726 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); in prepare_vmcs12()
9727 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); in prepare_vmcs12()
9729 vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); in prepare_vmcs12()
9730 vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP); in prepare_vmcs12()
9774 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) in prepare_vmcs12()
9783 vmx_get_preemption_timer_value(vcpu); in prepare_vmcs12()
9784 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); in prepare_vmcs12()
9808 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); in prepare_vmcs12()
9811 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7); in prepare_vmcs12()
9820 vmcs12->guest_ia32_efer = vcpu->arch.efer; in prepare_vmcs12()
9853 vmcs12_save_pending_event(vcpu, vmcs12); in prepare_vmcs12()
9860 vcpu->arch.nmi_injected = false; in prepare_vmcs12()
9861 kvm_clear_exception_queue(vcpu); in prepare_vmcs12()
9862 kvm_clear_interrupt_queue(vcpu); in prepare_vmcs12()
9874 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, in load_vmcs12_host_state() argument
9880 vcpu->arch.efer = vmcs12->host_ia32_efer; in load_vmcs12_host_state()
9882 vcpu->arch.efer |= (EFER_LMA | EFER_LME); in load_vmcs12_host_state()
9884 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); in load_vmcs12_host_state()
9885 vmx_set_efer(vcpu, vcpu->arch.efer); in load_vmcs12_host_state()
9887 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp); in load_vmcs12_host_state()
9888 kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip); in load_vmcs12_host_state()
9889 vmx_set_rflags(vcpu, X86_EFLAGS_FIXED); in load_vmcs12_host_state()
9896 vmx_set_cr0(vcpu, vmcs12->host_cr0); in load_vmcs12_host_state()
9902 update_exception_bitmap(vcpu); in load_vmcs12_host_state()
9903 vcpu->arch.cr0_guest_owned_bits = (vcpu->fpu_active ? X86_CR0_TS : 0); in load_vmcs12_host_state()
9904 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); in load_vmcs12_host_state()
9910 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); in load_vmcs12_host_state()
9911 kvm_set_cr4(vcpu, vmcs12->host_cr4); in load_vmcs12_host_state()
9913 nested_ept_uninit_mmu_context(vcpu); in load_vmcs12_host_state()
9915 kvm_set_cr3(vcpu, vmcs12->host_cr3); in load_vmcs12_host_state()
9916 kvm_mmu_reset_context(vcpu); in load_vmcs12_host_state()
9919 vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; in load_vmcs12_host_state()
9927 vmx_flush_tlb(vcpu); in load_vmcs12_host_state()
9943 vcpu->arch.pat = vmcs12->host_ia32_pat; in load_vmcs12_host_state()
9964 vmx_set_segment(vcpu, &seg, VCPU_SREG_CS); in load_vmcs12_host_state()
9975 vmx_set_segment(vcpu, &seg, VCPU_SREG_DS); in load_vmcs12_host_state()
9977 vmx_set_segment(vcpu, &seg, VCPU_SREG_ES); in load_vmcs12_host_state()
9979 vmx_set_segment(vcpu, &seg, VCPU_SREG_SS); in load_vmcs12_host_state()
9982 vmx_set_segment(vcpu, &seg, VCPU_SREG_FS); in load_vmcs12_host_state()
9985 vmx_set_segment(vcpu, &seg, VCPU_SREG_GS); in load_vmcs12_host_state()
9993 vmx_set_segment(vcpu, &seg, VCPU_SREG_TR); in load_vmcs12_host_state()
9995 kvm_set_dr(vcpu, 7, 0x400); in load_vmcs12_host_state()
9999 vmx_set_msr_bitmap(vcpu); in load_vmcs12_host_state()
10001 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr, in load_vmcs12_host_state()
10003 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); in load_vmcs12_host_state()
10011 static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, in nested_vmx_vmexit() argument
10015 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_vmexit()
10016 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); in nested_vmx_vmexit()
10021 leave_guest_mode(vcpu); in nested_vmx_vmexit()
10022 prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info, in nested_vmx_vmexit()
10025 if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr, in nested_vmx_vmexit()
10027 nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL); in nested_vmx_vmexit()
10029 vmx_load_vmcs01(vcpu); in nested_vmx_vmexit()
10032 && nested_exit_intr_ack_set(vcpu)) { in nested_vmx_vmexit()
10033 int irq = kvm_cpu_get_interrupt(vcpu); in nested_vmx_vmexit()
10054 load_vmcs12_host_state(vcpu, vmcs12); in nested_vmx_vmexit()
10082 kvm_vcpu_reload_apic_access_page(vcpu); in nested_vmx_vmexit()
10091 nested_vmx_failValid(vcpu, vmcs_read32(VM_INSTRUCTION_ERROR)); in nested_vmx_vmexit()
10093 nested_vmx_succeed(vcpu); in nested_vmx_vmexit()
10098 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in nested_vmx_vmexit()
10104 static void vmx_leave_nested(struct kvm_vcpu *vcpu) in vmx_leave_nested() argument
10106 if (is_guest_mode(vcpu)) in vmx_leave_nested()
10107 nested_vmx_vmexit(vcpu, -1, 0, 0); in vmx_leave_nested()
10108 free_nested(to_vmx(vcpu)); in vmx_leave_nested()
10118 static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu, in nested_vmx_entry_failure() argument
10122 load_vmcs12_host_state(vcpu, vmcs12); in nested_vmx_entry_failure()
10125 nested_vmx_succeed(vcpu); in nested_vmx_entry_failure()
10127 to_vmx(vcpu)->nested.sync_shadow_vmcs = true; in nested_vmx_entry_failure()
10130 static int vmx_check_intercept(struct kvm_vcpu *vcpu, in vmx_check_intercept() argument
10137 static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu) in vmx_sched_in() argument
10140 shrink_ple_window(vcpu); in vmx_sched_in()