Lines Matching refs:vcpu

521 	struct kvm_vcpu       vcpu;  member
611 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) in to_vmx() argument
613 return container_of(vcpu, struct vcpu_vmx, vcpu); in to_vmx()
616 static struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu) in vcpu_to_pi_desc() argument
618 return &(to_vmx(vcpu)->pi_desc); in vcpu_to_pi_desc()
837 static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu) in get_vmcs12() argument
839 return to_vmx(vcpu)->nested.current_vmcs12; in get_vmcs12()
842 static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr) in nested_get_page() argument
844 struct page *page = kvm_vcpu_gfn_to_page(vcpu, addr >> PAGE_SHIFT); in nested_get_page()
861 static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
867 static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu);
869 static void vmx_set_segment(struct kvm_vcpu *vcpu,
871 static void vmx_get_segment(struct kvm_vcpu *vcpu,
873 static bool guest_state_valid(struct kvm_vcpu *vcpu);
875 static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu);
954 static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
1011 static inline bool cpu_need_tpr_shadow(struct kvm_vcpu *vcpu) in cpu_need_tpr_shadow() argument
1013 return cpu_has_vmx_tpr_shadow() && lapic_in_kernel(vcpu); in cpu_need_tpr_shadow()
1128 static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu) in cpu_need_virtualize_apic_accesses() argument
1130 return flexpriority_enabled && lapic_in_kernel(vcpu); in cpu_need_virtualize_apic_accesses()
1255 static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
1258 static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
1594 if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) { in vmx_segment_cache_test_set()
1595 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS); in vmx_segment_cache_test_set()
1639 static void update_exception_bitmap(struct kvm_vcpu *vcpu) in update_exception_bitmap() argument
1645 if ((vcpu->guest_debug & in update_exception_bitmap()
1649 if (to_vmx(vcpu)->rmode.vm86_active) in update_exception_bitmap()
1653 if (vcpu->fpu_active) in update_exception_bitmap()
1661 if (is_guest_mode(vcpu)) in update_exception_bitmap()
1662 eb |= get_vmcs12(vcpu)->exception_bitmap; in update_exception_bitmap()
1795 u64 guest_efer = vmx->vcpu.arch.efer; in update_transition_efer()
1829 (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) { in update_transition_efer()
1883 static void vmx_save_host_state(struct kvm_vcpu *vcpu) in vmx_save_host_state() argument
1885 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_save_host_state()
1929 if (is_long_mode(&vmx->vcpu)) in vmx_save_host_state()
1945 ++vmx->vcpu.stat.host_state_reload; in __vmx_load_host_state()
1948 if (is_long_mode(&vmx->vcpu)) in __vmx_load_host_state()
1977 if (!fpregs_active() && !vmx->vcpu.guest_fpu_loaded) in __vmx_load_host_state()
1989 static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) in vmx_vcpu_pi_load() argument
1991 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); in vmx_vcpu_pi_load()
1995 if (!kvm_arch_has_assigned_device(vcpu->kvm) || in vmx_vcpu_pi_load()
2017 if (vcpu->cpu != cpu) { in vmx_vcpu_pi_load()
2039 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in vmx_vcpu_load() argument
2041 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_load()
2058 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in vmx_vcpu_load()
2089 vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) { in vmx_vcpu_load()
2090 vmx->current_tsc_ratio = vcpu->arch.tsc_scaling_ratio; in vmx_vcpu_load()
2094 vmx_vcpu_pi_load(vcpu, cpu); in vmx_vcpu_load()
2097 static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu) in vmx_vcpu_pi_put() argument
2099 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); in vmx_vcpu_pi_put()
2101 if (!kvm_arch_has_assigned_device(vcpu->kvm) || in vmx_vcpu_pi_put()
2106 if (vcpu->preempted) in vmx_vcpu_pi_put()
2110 static void vmx_vcpu_put(struct kvm_vcpu *vcpu) in vmx_vcpu_put() argument
2112 vmx_vcpu_pi_put(vcpu); in vmx_vcpu_put()
2114 __vmx_load_host_state(to_vmx(vcpu)); in vmx_vcpu_put()
2116 __loaded_vmcs_clear(to_vmx(vcpu)->loaded_vmcs); in vmx_vcpu_put()
2117 vcpu->cpu = -1; in vmx_vcpu_put()
2122 static void vmx_fpu_activate(struct kvm_vcpu *vcpu) in vmx_fpu_activate() argument
2126 if (vcpu->fpu_active) in vmx_fpu_activate()
2128 vcpu->fpu_active = 1; in vmx_fpu_activate()
2131 cr0 |= kvm_read_cr0_bits(vcpu, X86_CR0_TS | X86_CR0_MP); in vmx_fpu_activate()
2133 update_exception_bitmap(vcpu); in vmx_fpu_activate()
2134 vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; in vmx_fpu_activate()
2135 if (is_guest_mode(vcpu)) in vmx_fpu_activate()
2136 vcpu->arch.cr0_guest_owned_bits &= in vmx_fpu_activate()
2137 ~get_vmcs12(vcpu)->cr0_guest_host_mask; in vmx_fpu_activate()
2138 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); in vmx_fpu_activate()
2141 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
2159 static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu) in vmx_fpu_deactivate() argument
2164 vmx_decache_cr0_guest_bits(vcpu); in vmx_fpu_deactivate()
2166 update_exception_bitmap(vcpu); in vmx_fpu_deactivate()
2167 vcpu->arch.cr0_guest_owned_bits = 0; in vmx_fpu_deactivate()
2168 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); in vmx_fpu_deactivate()
2169 if (is_guest_mode(vcpu)) { in vmx_fpu_deactivate()
2178 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); in vmx_fpu_deactivate()
2180 (vcpu->arch.cr0 & X86_CR0_TS); in vmx_fpu_deactivate()
2183 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); in vmx_fpu_deactivate()
2186 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) in vmx_get_rflags() argument
2190 if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) { in vmx_get_rflags()
2191 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); in vmx_get_rflags()
2193 if (to_vmx(vcpu)->rmode.vm86_active) { in vmx_get_rflags()
2195 save_rflags = to_vmx(vcpu)->rmode.save_rflags; in vmx_get_rflags()
2198 to_vmx(vcpu)->rflags = rflags; in vmx_get_rflags()
2200 return to_vmx(vcpu)->rflags; in vmx_get_rflags()
2203 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) in vmx_set_rflags() argument
2205 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); in vmx_set_rflags()
2206 to_vmx(vcpu)->rflags = rflags; in vmx_set_rflags()
2207 if (to_vmx(vcpu)->rmode.vm86_active) { in vmx_set_rflags()
2208 to_vmx(vcpu)->rmode.save_rflags = rflags; in vmx_set_rflags()
2214 static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu) in vmx_get_interrupt_shadow() argument
2227 static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) in vmx_set_interrupt_shadow() argument
2243 static void skip_emulated_instruction(struct kvm_vcpu *vcpu) in skip_emulated_instruction() argument
2247 rip = kvm_rip_read(vcpu); in skip_emulated_instruction()
2249 kvm_rip_write(vcpu, rip); in skip_emulated_instruction()
2252 vmx_set_interrupt_shadow(vcpu, 0); in skip_emulated_instruction()
2259 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr) in nested_vmx_check_exception() argument
2261 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); in nested_vmx_check_exception()
2266 nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason, in nested_vmx_check_exception()
2272 static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, in vmx_queue_exception() argument
2276 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_queue_exception()
2279 if (!reinject && is_guest_mode(vcpu) && in vmx_queue_exception()
2280 nested_vmx_check_exception(vcpu, nr)) in vmx_queue_exception()
2291 inc_eip = vcpu->arch.event_exit_inst_len; in vmx_queue_exception()
2292 if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE) in vmx_queue_exception()
2293 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); in vmx_queue_exception()
2299 vmx->vcpu.arch.event_exit_inst_len); in vmx_queue_exception()
2329 static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu) in vmx_set_msr_bitmap() argument
2333 if (is_guest_mode(vcpu)) in vmx_set_msr_bitmap()
2335 else if (vcpu->arch.apic_base & X2APIC_ENABLE) { in vmx_set_msr_bitmap()
2336 if (is_long_mode(vcpu)) in vmx_set_msr_bitmap()
2341 if (is_long_mode(vcpu)) in vmx_set_msr_bitmap()
2361 if (is_long_mode(&vmx->vcpu)) { in setup_msrs()
2372 if (index >= 0 && guest_cpuid_has_rdtscp(&vmx->vcpu)) in setup_msrs()
2379 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE)) in setup_msrs()
2390 vmx_set_msr_bitmap(&vmx->vcpu); in setup_msrs()
2398 static u64 guest_read_tsc(struct kvm_vcpu *vcpu) in guest_read_tsc() argument
2404 return kvm_scale_tsc(vcpu, host_tsc) + tsc_offset; in guest_read_tsc()
2411 static u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) in vmx_read_l1_tsc() argument
2415 tsc_offset = is_guest_mode(vcpu) ? in vmx_read_l1_tsc()
2416 to_vmx(vcpu)->nested.vmcs01_tsc_offset : in vmx_read_l1_tsc()
2421 static u64 vmx_read_tsc_offset(struct kvm_vcpu *vcpu) in vmx_read_tsc_offset() argument
2429 static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) in vmx_write_tsc_offset() argument
2431 if (is_guest_mode(vcpu)) { in vmx_write_tsc_offset()
2439 to_vmx(vcpu)->nested.vmcs01_tsc_offset = offset; in vmx_write_tsc_offset()
2441 vmcs12 = get_vmcs12(vcpu); in vmx_write_tsc_offset()
2446 trace_kvm_write_tsc_offset(vcpu->vcpu_id, in vmx_write_tsc_offset()
2452 static void vmx_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment) in vmx_adjust_tsc_offset_guest() argument
2457 if (is_guest_mode(vcpu)) { in vmx_adjust_tsc_offset_guest()
2459 to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment; in vmx_adjust_tsc_offset_guest()
2461 trace_kvm_write_tsc_offset(vcpu->vcpu_id, offset, in vmx_adjust_tsc_offset_guest()
2465 static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu) in guest_cpuid_has_vmx() argument
2467 struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0); in guest_cpuid_has_vmx()
2477 static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu) in nested_vmx_allowed() argument
2479 return nested && guest_cpuid_has_vmx(vcpu); in nested_vmx_allowed()
2522 if (vmx_cpu_uses_apicv(&vmx->vcpu)) in nested_vmx_setup_ctls_msrs()
2682 static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) in vmx_get_vmx_msr() argument
2684 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_vmx_msr()
2783 static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in vmx_get_msr() argument
2796 vmx_load_host_state(to_vmx(vcpu)); in vmx_get_msr()
2797 msr_info->data = to_vmx(vcpu)->msr_guest_kernel_gs_base; in vmx_get_msr()
2801 return kvm_get_msr_common(vcpu, msr_info); in vmx_get_msr()
2803 msr_info->data = guest_read_tsc(vcpu); in vmx_get_msr()
2820 if (!nested_vmx_allowed(vcpu)) in vmx_get_msr()
2822 msr_info->data = to_vmx(vcpu)->nested.msr_ia32_feature_control; in vmx_get_msr()
2825 if (!nested_vmx_allowed(vcpu)) in vmx_get_msr()
2827 return vmx_get_vmx_msr(vcpu, msr_info->index, &msr_info->data); in vmx_get_msr()
2831 msr_info->data = vcpu->arch.ia32_xss; in vmx_get_msr()
2834 if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated) in vmx_get_msr()
2838 msr = find_msr_entry(to_vmx(vcpu), msr_info->index); in vmx_get_msr()
2843 return kvm_get_msr_common(vcpu, msr_info); in vmx_get_msr()
2849 static void vmx_leave_nested(struct kvm_vcpu *vcpu);
2856 static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in vmx_set_msr() argument
2858 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_msr()
2866 ret = kvm_set_msr_common(vcpu, msr_info); in vmx_set_msr()
2897 kvm_write_tsc(vcpu, msr_info); in vmx_set_msr()
2901 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) in vmx_set_msr()
2904 vcpu->arch.pat = data; in vmx_set_msr()
2907 ret = kvm_set_msr_common(vcpu, msr_info); in vmx_set_msr()
2910 ret = kvm_set_msr_common(vcpu, msr_info); in vmx_set_msr()
2913 if (!nested_vmx_allowed(vcpu) || in vmx_set_msr()
2914 (to_vmx(vcpu)->nested.msr_ia32_feature_control & in vmx_set_msr()
2919 vmx_leave_nested(vcpu); in vmx_set_msr()
2932 vcpu->arch.ia32_xss = data; in vmx_set_msr()
2933 if (vcpu->arch.ia32_xss != host_xss) in vmx_set_msr()
2935 vcpu->arch.ia32_xss, host_xss); in vmx_set_msr()
2940 if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated) in vmx_set_msr()
2961 ret = kvm_set_msr_common(vcpu, msr_info); in vmx_set_msr()
2967 static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) in vmx_cache_reg() argument
2969 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); in vmx_cache_reg()
2972 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); in vmx_cache_reg()
2975 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); in vmx_cache_reg()
2979 ept_save_pdptrs(vcpu); in vmx_cache_reg()
3409 static bool emulation_required(struct kvm_vcpu *vcpu) in emulation_required() argument
3411 return emulate_invalid_guest_state && !guest_state_valid(vcpu); in emulation_required()
3414 static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg, in fix_pmode_seg() argument
3430 vmx_set_segment(vcpu, save, seg); in fix_pmode_seg()
3433 static void enter_pmode(struct kvm_vcpu *vcpu) in enter_pmode() argument
3436 struct vcpu_vmx *vmx = to_vmx(vcpu); in enter_pmode()
3442 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); in enter_pmode()
3443 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); in enter_pmode()
3444 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); in enter_pmode()
3445 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); in enter_pmode()
3446 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); in enter_pmode()
3447 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); in enter_pmode()
3453 vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); in enter_pmode()
3463 update_exception_bitmap(vcpu); in enter_pmode()
3465 fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); in enter_pmode()
3466 fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); in enter_pmode()
3467 fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); in enter_pmode()
3468 fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); in enter_pmode()
3469 fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); in enter_pmode()
3470 fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); in enter_pmode()
3506 static void enter_rmode(struct kvm_vcpu *vcpu) in enter_rmode() argument
3509 struct vcpu_vmx *vmx = to_vmx(vcpu); in enter_rmode()
3511 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); in enter_rmode()
3512 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); in enter_rmode()
3513 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); in enter_rmode()
3514 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); in enter_rmode()
3515 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); in enter_rmode()
3516 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); in enter_rmode()
3517 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); in enter_rmode()
3525 if (!vcpu->kvm->arch.tss_addr) in enter_rmode()
3531 vmcs_writel(GUEST_TR_BASE, vcpu->kvm->arch.tss_addr); in enter_rmode()
3542 update_exception_bitmap(vcpu); in enter_rmode()
3551 kvm_mmu_reset_context(vcpu); in enter_rmode()
3554 static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) in vmx_set_efer() argument
3556 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_efer()
3566 vmx_load_host_state(to_vmx(vcpu)); in vmx_set_efer()
3567 vcpu->arch.efer = efer; in vmx_set_efer()
3569 vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); in vmx_set_efer()
3572 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); in vmx_set_efer()
3581 static void enter_lmode(struct kvm_vcpu *vcpu) in enter_lmode() argument
3585 vmx_segment_cache_clear(to_vmx(vcpu)); in enter_lmode()
3595 vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA); in enter_lmode()
3598 static void exit_lmode(struct kvm_vcpu *vcpu) in exit_lmode() argument
3600 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); in exit_lmode()
3601 vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA); in exit_lmode()
3606 static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid) in __vmx_flush_tlb() argument
3610 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in __vmx_flush_tlb()
3612 ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa)); in __vmx_flush_tlb()
3616 static void vmx_flush_tlb(struct kvm_vcpu *vcpu) in vmx_flush_tlb() argument
3618 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid); in vmx_flush_tlb()
3621 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) in vmx_decache_cr0_guest_bits() argument
3623 ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; in vmx_decache_cr0_guest_bits()
3625 vcpu->arch.cr0 &= ~cr0_guest_owned_bits; in vmx_decache_cr0_guest_bits()
3626 vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits; in vmx_decache_cr0_guest_bits()
3629 static void vmx_decache_cr3(struct kvm_vcpu *vcpu) in vmx_decache_cr3() argument
3631 if (enable_ept && is_paging(vcpu)) in vmx_decache_cr3()
3632 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); in vmx_decache_cr3()
3633 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); in vmx_decache_cr3()
3636 static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) in vmx_decache_cr4_guest_bits() argument
3638 ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits; in vmx_decache_cr4_guest_bits()
3640 vcpu->arch.cr4 &= ~cr4_guest_owned_bits; in vmx_decache_cr4_guest_bits()
3641 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits; in vmx_decache_cr4_guest_bits()
3644 static void ept_load_pdptrs(struct kvm_vcpu *vcpu) in ept_load_pdptrs() argument
3646 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in ept_load_pdptrs()
3649 (unsigned long *)&vcpu->arch.regs_dirty)) in ept_load_pdptrs()
3652 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { in ept_load_pdptrs()
3660 static void ept_save_pdptrs(struct kvm_vcpu *vcpu) in ept_save_pdptrs() argument
3662 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in ept_save_pdptrs()
3664 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { in ept_save_pdptrs()
3672 (unsigned long *)&vcpu->arch.regs_avail); in ept_save_pdptrs()
3674 (unsigned long *)&vcpu->arch.regs_dirty); in ept_save_pdptrs()
3677 static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
3681 struct kvm_vcpu *vcpu) in ept_update_paging_mode_cr0() argument
3683 if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) in ept_update_paging_mode_cr0()
3684 vmx_decache_cr3(vcpu); in ept_update_paging_mode_cr0()
3691 vcpu->arch.cr0 = cr0; in ept_update_paging_mode_cr0()
3692 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); in ept_update_paging_mode_cr0()
3693 } else if (!is_paging(vcpu)) { in ept_update_paging_mode_cr0()
3699 vcpu->arch.cr0 = cr0; in ept_update_paging_mode_cr0()
3700 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); in ept_update_paging_mode_cr0()
3707 static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) in vmx_set_cr0() argument
3709 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_cr0()
3719 enter_pmode(vcpu); in vmx_set_cr0()
3722 enter_rmode(vcpu); in vmx_set_cr0()
3726 if (vcpu->arch.efer & EFER_LME) { in vmx_set_cr0()
3727 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) in vmx_set_cr0()
3728 enter_lmode(vcpu); in vmx_set_cr0()
3729 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) in vmx_set_cr0()
3730 exit_lmode(vcpu); in vmx_set_cr0()
3735 ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu); in vmx_set_cr0()
3737 if (!vcpu->fpu_active) in vmx_set_cr0()
3742 vcpu->arch.cr0 = cr0; in vmx_set_cr0()
3745 vmx->emulation_required = emulation_required(vcpu); in vmx_set_cr0()
3762 static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) in vmx_set_cr3() argument
3771 if (is_paging(vcpu) || is_guest_mode(vcpu)) in vmx_set_cr3()
3772 guest_cr3 = kvm_read_cr3(vcpu); in vmx_set_cr3()
3774 guest_cr3 = vcpu->kvm->arch.ept_identity_map_addr; in vmx_set_cr3()
3775 ept_load_pdptrs(vcpu); in vmx_set_cr3()
3778 vmx_flush_tlb(vcpu); in vmx_set_cr3()
3782 static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) in vmx_set_cr4() argument
3792 (to_vmx(vcpu)->rmode.vm86_active ? in vmx_set_cr4()
3802 if (!nested_vmx_allowed(vcpu)) in vmx_set_cr4()
3805 if (to_vmx(vcpu)->nested.vmxon && in vmx_set_cr4()
3809 vcpu->arch.cr4 = cr4; in vmx_set_cr4()
3811 if (!is_paging(vcpu)) { in vmx_set_cr4()
3819 if (!enable_unrestricted_guest && !is_paging(vcpu)) in vmx_set_cr4()
3834 static void vmx_get_segment(struct kvm_vcpu *vcpu, in vmx_get_segment() argument
3837 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_segment()
3871 static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) in vmx_get_segment_base() argument
3875 if (to_vmx(vcpu)->rmode.vm86_active) { in vmx_get_segment_base()
3876 vmx_get_segment(vcpu, &s, seg); in vmx_get_segment_base()
3879 return vmx_read_guest_seg_base(to_vmx(vcpu), seg); in vmx_get_segment_base()
3882 static int vmx_get_cpl(struct kvm_vcpu *vcpu) in vmx_get_cpl() argument
3884 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_cpl()
3914 static void vmx_set_segment(struct kvm_vcpu *vcpu, in vmx_set_segment() argument
3917 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_segment()
3952 vmx->emulation_required = emulation_required(vcpu); in vmx_set_segment()
3955 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) in vmx_get_cs_db_l_bits() argument
3957 u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS); in vmx_get_cs_db_l_bits()
3963 static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) in vmx_get_idt() argument
3969 static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) in vmx_set_idt() argument
3975 static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) in vmx_get_gdt() argument
3981 static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) in vmx_set_gdt() argument
3987 static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg) in rmode_segment_valid() argument
3992 vmx_get_segment(vcpu, &var, seg); in rmode_segment_valid()
4008 static bool code_segment_valid(struct kvm_vcpu *vcpu) in code_segment_valid() argument
4013 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); in code_segment_valid()
4036 static bool stack_segment_valid(struct kvm_vcpu *vcpu) in stack_segment_valid() argument
4041 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); in stack_segment_valid()
4058 static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg) in data_segment_valid() argument
4063 vmx_get_segment(vcpu, &var, seg); in data_segment_valid()
4083 static bool tr_valid(struct kvm_vcpu *vcpu) in tr_valid() argument
4087 vmx_get_segment(vcpu, &tr, VCPU_SREG_TR); in tr_valid()
4101 static bool ldtr_valid(struct kvm_vcpu *vcpu) in ldtr_valid() argument
4105 vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR); in ldtr_valid()
4119 static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu) in cs_ss_rpl_check() argument
4123 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); in cs_ss_rpl_check()
4124 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); in cs_ss_rpl_check()
4135 static bool guest_state_valid(struct kvm_vcpu *vcpu) in guest_state_valid() argument
4141 if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) { in guest_state_valid()
4142 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS)) in guest_state_valid()
4144 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS)) in guest_state_valid()
4146 if (!rmode_segment_valid(vcpu, VCPU_SREG_DS)) in guest_state_valid()
4148 if (!rmode_segment_valid(vcpu, VCPU_SREG_ES)) in guest_state_valid()
4150 if (!rmode_segment_valid(vcpu, VCPU_SREG_FS)) in guest_state_valid()
4152 if (!rmode_segment_valid(vcpu, VCPU_SREG_GS)) in guest_state_valid()
4156 if (!cs_ss_rpl_check(vcpu)) in guest_state_valid()
4158 if (!code_segment_valid(vcpu)) in guest_state_valid()
4160 if (!stack_segment_valid(vcpu)) in guest_state_valid()
4162 if (!data_segment_valid(vcpu, VCPU_SREG_DS)) in guest_state_valid()
4164 if (!data_segment_valid(vcpu, VCPU_SREG_ES)) in guest_state_valid()
4166 if (!data_segment_valid(vcpu, VCPU_SREG_FS)) in guest_state_valid()
4168 if (!data_segment_valid(vcpu, VCPU_SREG_GS)) in guest_state_valid()
4170 if (!tr_valid(vcpu)) in guest_state_valid()
4172 if (!ldtr_valid(vcpu)) in guest_state_valid()
4493 static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu) in vmx_cpu_uses_apicv() argument
4495 return enable_apicv && lapic_in_kernel(vcpu); in vmx_cpu_uses_apicv()
4498 static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) in vmx_complete_nested_posted_interrupt() argument
4500 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_complete_nested_posted_interrupt()
4535 static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu) in kvm_vcpu_trigger_posted_interrupt() argument
4538 if (vcpu->mode == IN_GUEST_MODE) { in kvm_vcpu_trigger_posted_interrupt()
4539 struct vcpu_vmx *vmx = to_vmx(vcpu); in kvm_vcpu_trigger_posted_interrupt()
4555 apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), in kvm_vcpu_trigger_posted_interrupt()
4563 static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, in vmx_deliver_nested_posted_interrupt() argument
4566 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_deliver_nested_posted_interrupt()
4568 if (is_guest_mode(vcpu) && in vmx_deliver_nested_posted_interrupt()
4571 kvm_vcpu_trigger_posted_interrupt(vcpu); in vmx_deliver_nested_posted_interrupt()
4577 kvm_make_request(KVM_REQ_EVENT, vcpu); in vmx_deliver_nested_posted_interrupt()
4589 static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) in vmx_deliver_posted_interrupt() argument
4591 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_deliver_posted_interrupt()
4594 r = vmx_deliver_nested_posted_interrupt(vcpu, vector); in vmx_deliver_posted_interrupt()
4602 kvm_make_request(KVM_REQ_EVENT, vcpu); in vmx_deliver_posted_interrupt()
4603 if (r || !kvm_vcpu_trigger_posted_interrupt(vcpu)) in vmx_deliver_posted_interrupt()
4604 kvm_vcpu_kick(vcpu); in vmx_deliver_posted_interrupt()
4607 static void vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu) in vmx_sync_pir_to_irr() argument
4609 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_sync_pir_to_irr()
4614 kvm_apic_update_irr(vcpu, vmx->pi_desc.pir); in vmx_sync_pir_to_irr()
4617 static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu) in vmx_sync_pir_to_irr_dummy() argument
4678 vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS; in set_cr4_guest_host_mask()
4680 vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; in set_cr4_guest_host_mask()
4681 if (is_guest_mode(&vmx->vcpu)) in set_cr4_guest_host_mask()
4682 vmx->vcpu.arch.cr4_guest_owned_bits &= in set_cr4_guest_host_mask()
4683 ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask; in set_cr4_guest_host_mask()
4684 vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits); in set_cr4_guest_host_mask()
4691 if (!vmx_cpu_uses_apicv(&vmx->vcpu)) in vmx_pin_based_exec_ctrl()
4700 if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT) in vmx_exec_control()
4703 if (!cpu_need_tpr_shadow(&vmx->vcpu)) { in vmx_exec_control()
4720 if (!cpu_need_virtualize_apic_accesses(&vmx->vcpu)) in vmx_secondary_exec_control()
4734 if (!vmx_cpu_uses_apicv(&vmx->vcpu)) in vmx_secondary_exec_control()
4798 if (vmx_cpu_uses_apicv(&vmx->vcpu)) { in vmx_vcpu_setup()
4840 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); in vmx_vcpu_setup()
4872 static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) in vmx_vcpu_reset() argument
4874 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_reset()
4882 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); in vmx_vcpu_reset()
4883 kvm_set_cr8(vcpu, 0); in vmx_vcpu_reset()
4888 if (kvm_vcpu_is_reset_bsp(vcpu)) in vmx_vcpu_reset()
4891 kvm_set_apic_base(vcpu, &apic_base_msr); in vmx_vcpu_reset()
4924 kvm_rip_write(vcpu, 0xfff0); in vmx_vcpu_reset()
4942 if (cpu_need_tpr_shadow(vcpu)) in vmx_vcpu_reset()
4944 __pa(vcpu->arch.apic->regs)); in vmx_vcpu_reset()
4948 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); in vmx_vcpu_reset()
4950 if (vmx_cpu_uses_apicv(vcpu)) in vmx_vcpu_reset()
4957 vmx->vcpu.arch.cr0 = cr0; in vmx_vcpu_reset()
4958 vmx_set_cr0(vcpu, cr0); /* enter rmode */ in vmx_vcpu_reset()
4959 vmx_set_cr4(vcpu, 0); in vmx_vcpu_reset()
4960 vmx_set_efer(vcpu, 0); in vmx_vcpu_reset()
4961 vmx_fpu_activate(vcpu); in vmx_vcpu_reset()
4962 update_exception_bitmap(vcpu); in vmx_vcpu_reset()
4971 static bool nested_exit_on_intr(struct kvm_vcpu *vcpu) in nested_exit_on_intr() argument
4973 return get_vmcs12(vcpu)->pin_based_vm_exec_control & in nested_exit_on_intr()
4981 static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu) in nested_exit_intr_ack_set() argument
4983 return get_vmcs12(vcpu)->vm_exit_controls & in nested_exit_intr_ack_set()
4987 static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu) in nested_exit_on_nmi() argument
4989 return get_vmcs12(vcpu)->pin_based_vm_exec_control & in nested_exit_on_nmi()
4993 static void enable_irq_window(struct kvm_vcpu *vcpu) in enable_irq_window() argument
5002 static void enable_nmi_window(struct kvm_vcpu *vcpu) in enable_nmi_window() argument
5008 enable_irq_window(vcpu); in enable_nmi_window()
5017 static void vmx_inject_irq(struct kvm_vcpu *vcpu) in vmx_inject_irq() argument
5019 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_inject_irq()
5021 int irq = vcpu->arch.interrupt.nr; in vmx_inject_irq()
5025 ++vcpu->stat.irq_injections; in vmx_inject_irq()
5028 if (vcpu->arch.interrupt.soft) in vmx_inject_irq()
5029 inc_eip = vcpu->arch.event_exit_inst_len; in vmx_inject_irq()
5030 if (kvm_inject_realmode_interrupt(vcpu, irq, inc_eip) != EMULATE_DONE) in vmx_inject_irq()
5031 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); in vmx_inject_irq()
5035 if (vcpu->arch.interrupt.soft) { in vmx_inject_irq()
5038 vmx->vcpu.arch.event_exit_inst_len); in vmx_inject_irq()
5044 static void vmx_inject_nmi(struct kvm_vcpu *vcpu) in vmx_inject_nmi() argument
5046 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_inject_nmi()
5048 if (is_guest_mode(vcpu)) in vmx_inject_nmi()
5064 ++vcpu->stat.nmi_injections; in vmx_inject_nmi()
5067 if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE) in vmx_inject_nmi()
5068 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); in vmx_inject_nmi()
5075 static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu) in vmx_get_nmi_mask() argument
5078 return to_vmx(vcpu)->soft_vnmi_blocked; in vmx_get_nmi_mask()
5079 if (to_vmx(vcpu)->nmi_known_unmasked) in vmx_get_nmi_mask()
5084 static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) in vmx_set_nmi_mask() argument
5086 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_nmi_mask()
5104 static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) in vmx_nmi_allowed() argument
5106 if (to_vmx(vcpu)->nested.nested_run_pending) in vmx_nmi_allowed()
5109 if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked) in vmx_nmi_allowed()
5117 static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) in vmx_interrupt_allowed() argument
5119 return (!to_vmx(vcpu)->nested.nested_run_pending && in vmx_interrupt_allowed()
5137 static bool rmode_exception(struct kvm_vcpu *vcpu, int vec) in rmode_exception() argument
5145 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len = in rmode_exception()
5147 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) in rmode_exception()
5151 if (vcpu->guest_debug & in rmode_exception()
5169 static int handle_rmode_exception(struct kvm_vcpu *vcpu, in handle_rmode_exception() argument
5177 if (emulate_instruction(vcpu, 0) == EMULATE_DONE) { in handle_rmode_exception()
5178 if (vcpu->arch.halt_request) { in handle_rmode_exception()
5179 vcpu->arch.halt_request = 0; in handle_rmode_exception()
5180 return kvm_vcpu_halt(vcpu); in handle_rmode_exception()
5192 kvm_queue_exception(vcpu, vec); in handle_rmode_exception()
5215 static int handle_machine_check(struct kvm_vcpu *vcpu) in handle_machine_check() argument
5221 static int handle_exception(struct kvm_vcpu *vcpu) in handle_exception() argument
5223 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_exception()
5224 struct kvm_run *kvm_run = vcpu->run; in handle_exception()
5234 return handle_machine_check(vcpu); in handle_exception()
5240 vmx_fpu_activate(vcpu); in handle_exception()
5245 if (is_guest_mode(vcpu)) { in handle_exception()
5246 kvm_queue_exception(vcpu, UD_VECTOR); in handle_exception()
5249 er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD); in handle_exception()
5251 kvm_queue_exception(vcpu, UD_VECTOR); in handle_exception()
5266 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in handle_exception()
5267 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX; in handle_exception()
5268 vcpu->run->internal.ndata = 3; in handle_exception()
5269 vcpu->run->internal.data[0] = vect_info; in handle_exception()
5270 vcpu->run->internal.data[1] = intr_info; in handle_exception()
5271 vcpu->run->internal.data[2] = error_code; in handle_exception()
5281 if (kvm_event_needs_reinjection(vcpu)) in handle_exception()
5282 kvm_mmu_unprotect_page_virt(vcpu, cr2); in handle_exception()
5283 return kvm_mmu_page_fault(vcpu, cr2, error_code, NULL, 0); in handle_exception()
5288 if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no)) in handle_exception()
5289 return handle_rmode_exception(vcpu, ex_no, error_code); in handle_exception()
5293 kvm_queue_exception_e(vcpu, AC_VECTOR, error_code); in handle_exception()
5297 if (!(vcpu->guest_debug & in handle_exception()
5299 vcpu->arch.dr6 &= ~15; in handle_exception()
5300 vcpu->arch.dr6 |= dr6 | DR6_RTM; in handle_exception()
5302 skip_emulated_instruction(vcpu); in handle_exception()
5304 kvm_queue_exception(vcpu, DB_VECTOR); in handle_exception()
5316 vmx->vcpu.arch.event_exit_inst_len = in handle_exception()
5319 rip = kvm_rip_read(vcpu); in handle_exception()
5332 static int handle_external_interrupt(struct kvm_vcpu *vcpu) in handle_external_interrupt() argument
5334 ++vcpu->stat.irq_exits; in handle_external_interrupt()
5338 static int handle_triple_fault(struct kvm_vcpu *vcpu) in handle_triple_fault() argument
5340 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; in handle_triple_fault()
5344 static int handle_io(struct kvm_vcpu *vcpu) in handle_io() argument
5354 ++vcpu->stat.io_exits; in handle_io()
5357 return emulate_instruction(vcpu, 0) == EMULATE_DONE; in handle_io()
5361 skip_emulated_instruction(vcpu); in handle_io()
5363 return kvm_fast_pio_out(vcpu, size, port); in handle_io()
5367 vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) in vmx_patch_hypercall() argument
5377 static bool nested_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val) in nested_cr0_valid() argument
5380 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); in nested_cr0_valid()
5382 if (to_vmx(vcpu)->nested.nested_vmx_secondary_ctls_high & in nested_cr0_valid()
5390 static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val) in handle_set_cr0() argument
5392 if (is_guest_mode(vcpu)) { in handle_set_cr0()
5393 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); in handle_set_cr0()
5407 if (!nested_cr0_valid(vcpu, val)) in handle_set_cr0()
5410 if (kvm_set_cr0(vcpu, val)) in handle_set_cr0()
5415 if (to_vmx(vcpu)->nested.vmxon && in handle_set_cr0()
5418 return kvm_set_cr0(vcpu, val); in handle_set_cr0()
5422 static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val) in handle_set_cr4() argument
5424 if (is_guest_mode(vcpu)) { in handle_set_cr4()
5425 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); in handle_set_cr4()
5431 if (kvm_set_cr4(vcpu, val)) in handle_set_cr4()
5436 return kvm_set_cr4(vcpu, val); in handle_set_cr4()
5440 static void handle_clts(struct kvm_vcpu *vcpu) in handle_clts() argument
5442 if (is_guest_mode(vcpu)) { in handle_clts()
5450 vcpu->arch.cr0 &= ~X86_CR0_TS; in handle_clts()
5452 vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS)); in handle_clts()
5455 static int handle_cr(struct kvm_vcpu *vcpu) in handle_cr() argument
5467 val = kvm_register_readl(vcpu, reg); in handle_cr()
5471 err = handle_set_cr0(vcpu, val); in handle_cr()
5472 kvm_complete_insn_gp(vcpu, err); in handle_cr()
5475 err = kvm_set_cr3(vcpu, val); in handle_cr()
5476 kvm_complete_insn_gp(vcpu, err); in handle_cr()
5479 err = handle_set_cr4(vcpu, val); in handle_cr()
5480 kvm_complete_insn_gp(vcpu, err); in handle_cr()
5483 u8 cr8_prev = kvm_get_cr8(vcpu); in handle_cr()
5485 err = kvm_set_cr8(vcpu, cr8); in handle_cr()
5486 kvm_complete_insn_gp(vcpu, err); in handle_cr()
5487 if (lapic_in_kernel(vcpu)) in handle_cr()
5491 vcpu->run->exit_reason = KVM_EXIT_SET_TPR; in handle_cr()
5497 handle_clts(vcpu); in handle_cr()
5498 trace_kvm_cr_write(0, kvm_read_cr0(vcpu)); in handle_cr()
5499 skip_emulated_instruction(vcpu); in handle_cr()
5500 vmx_fpu_activate(vcpu); in handle_cr()
5505 val = kvm_read_cr3(vcpu); in handle_cr()
5506 kvm_register_write(vcpu, reg, val); in handle_cr()
5508 skip_emulated_instruction(vcpu); in handle_cr()
5511 val = kvm_get_cr8(vcpu); in handle_cr()
5512 kvm_register_write(vcpu, reg, val); in handle_cr()
5514 skip_emulated_instruction(vcpu); in handle_cr()
5520 trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val); in handle_cr()
5521 kvm_lmsw(vcpu, val); in handle_cr()
5523 skip_emulated_instruction(vcpu); in handle_cr()
5528 vcpu->run->exit_reason = 0; in handle_cr()
5529 vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n", in handle_cr()
5534 static int handle_dr(struct kvm_vcpu *vcpu) in handle_dr() argument
5543 if (!kvm_require_dr(vcpu, dr)) in handle_dr()
5547 if (!kvm_require_cpl(vcpu, 0)) in handle_dr()
5556 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { in handle_dr()
5557 vcpu->run->debug.arch.dr6 = vcpu->arch.dr6; in handle_dr()
5558 vcpu->run->debug.arch.dr7 = dr7; in handle_dr()
5559 vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu); in handle_dr()
5560 vcpu->run->debug.arch.exception = DB_VECTOR; in handle_dr()
5561 vcpu->run->exit_reason = KVM_EXIT_DEBUG; in handle_dr()
5564 vcpu->arch.dr6 &= ~15; in handle_dr()
5565 vcpu->arch.dr6 |= DR6_BD | DR6_RTM; in handle_dr()
5566 kvm_queue_exception(vcpu, DB_VECTOR); in handle_dr()
5571 if (vcpu->guest_debug == 0) { in handle_dr()
5583 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; in handle_dr()
5591 if (kvm_get_dr(vcpu, dr, &val)) in handle_dr()
5593 kvm_register_write(vcpu, reg, val); in handle_dr()
5595 if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg))) in handle_dr()
5598 skip_emulated_instruction(vcpu); in handle_dr()
5602 static u64 vmx_get_dr6(struct kvm_vcpu *vcpu) in vmx_get_dr6() argument
5604 return vcpu->arch.dr6; in vmx_get_dr6()
5607 static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val) in vmx_set_dr6() argument
5611 static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) in vmx_sync_dirty_debug_regs() argument
5615 get_debugreg(vcpu->arch.db[0], 0); in vmx_sync_dirty_debug_regs()
5616 get_debugreg(vcpu->arch.db[1], 1); in vmx_sync_dirty_debug_regs()
5617 get_debugreg(vcpu->arch.db[2], 2); in vmx_sync_dirty_debug_regs()
5618 get_debugreg(vcpu->arch.db[3], 3); in vmx_sync_dirty_debug_regs()
5619 get_debugreg(vcpu->arch.dr6, 6); in vmx_sync_dirty_debug_regs()
5620 vcpu->arch.dr7 = vmcs_readl(GUEST_DR7); in vmx_sync_dirty_debug_regs()
5622 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; in vmx_sync_dirty_debug_regs()
5629 static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val) in vmx_set_dr7() argument
5634 static int handle_cpuid(struct kvm_vcpu *vcpu) in handle_cpuid() argument
5636 kvm_emulate_cpuid(vcpu); in handle_cpuid()
5640 static int handle_rdmsr(struct kvm_vcpu *vcpu) in handle_rdmsr() argument
5642 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; in handle_rdmsr()
5647 if (vmx_get_msr(vcpu, &msr_info)) { in handle_rdmsr()
5649 kvm_inject_gp(vcpu, 0); in handle_rdmsr()
5656 vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u; in handle_rdmsr()
5657 vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u; in handle_rdmsr()
5658 skip_emulated_instruction(vcpu); in handle_rdmsr()
5662 static int handle_wrmsr(struct kvm_vcpu *vcpu) in handle_wrmsr() argument
5665 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; in handle_wrmsr()
5666 u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) in handle_wrmsr()
5667 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); in handle_wrmsr()
5672 if (kvm_set_msr(vcpu, &msr) != 0) { in handle_wrmsr()
5674 kvm_inject_gp(vcpu, 0); in handle_wrmsr()
5679 skip_emulated_instruction(vcpu); in handle_wrmsr()
5683 static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu) in handle_tpr_below_threshold() argument
5685 kvm_make_request(KVM_REQ_EVENT, vcpu); in handle_tpr_below_threshold()
5689 static int handle_interrupt_window(struct kvm_vcpu *vcpu) in handle_interrupt_window() argument
5698 kvm_make_request(KVM_REQ_EVENT, vcpu); in handle_interrupt_window()
5700 ++vcpu->stat.irq_window_exits; in handle_interrupt_window()
5704 static int handle_halt(struct kvm_vcpu *vcpu) in handle_halt() argument
5706 return kvm_emulate_halt(vcpu); in handle_halt()
5709 static int handle_vmcall(struct kvm_vcpu *vcpu) in handle_vmcall() argument
5711 kvm_emulate_hypercall(vcpu); in handle_vmcall()
5715 static int handle_invd(struct kvm_vcpu *vcpu) in handle_invd() argument
5717 return emulate_instruction(vcpu, 0) == EMULATE_DONE; in handle_invd()
5720 static int handle_invlpg(struct kvm_vcpu *vcpu) in handle_invlpg() argument
5724 kvm_mmu_invlpg(vcpu, exit_qualification); in handle_invlpg()
5725 skip_emulated_instruction(vcpu); in handle_invlpg()
5729 static int handle_rdpmc(struct kvm_vcpu *vcpu) in handle_rdpmc() argument
5733 err = kvm_rdpmc(vcpu); in handle_rdpmc()
5734 kvm_complete_insn_gp(vcpu, err); in handle_rdpmc()
5739 static int handle_wbinvd(struct kvm_vcpu *vcpu) in handle_wbinvd() argument
5741 kvm_emulate_wbinvd(vcpu); in handle_wbinvd()
5745 static int handle_xsetbv(struct kvm_vcpu *vcpu) in handle_xsetbv() argument
5747 u64 new_bv = kvm_read_edx_eax(vcpu); in handle_xsetbv()
5748 u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX); in handle_xsetbv()
5750 if (kvm_set_xcr(vcpu, index, new_bv) == 0) in handle_xsetbv()
5751 skip_emulated_instruction(vcpu); in handle_xsetbv()
5755 static int handle_xsaves(struct kvm_vcpu *vcpu) in handle_xsaves() argument
5757 skip_emulated_instruction(vcpu); in handle_xsaves()
5762 static int handle_xrstors(struct kvm_vcpu *vcpu) in handle_xrstors() argument
5764 skip_emulated_instruction(vcpu); in handle_xrstors()
5769 static int handle_apic_access(struct kvm_vcpu *vcpu) in handle_apic_access() argument
5784 kvm_lapic_set_eoi(vcpu); in handle_apic_access()
5785 skip_emulated_instruction(vcpu); in handle_apic_access()
5789 return emulate_instruction(vcpu, 0) == EMULATE_DONE; in handle_apic_access()
5792 static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu) in handle_apic_eoi_induced() argument
5798 kvm_apic_set_eoi_accelerated(vcpu, vector); in handle_apic_eoi_induced()
5802 static int handle_apic_write(struct kvm_vcpu *vcpu) in handle_apic_write() argument
5808 kvm_apic_write_nodecode(vcpu, offset); in handle_apic_write()
5812 static int handle_task_switch(struct kvm_vcpu *vcpu) in handle_task_switch() argument
5814 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_task_switch()
5831 vcpu->arch.nmi_injected = false; in handle_task_switch()
5832 vmx_set_nmi_mask(vcpu, true); in handle_task_switch()
5836 kvm_clear_interrupt_queue(vcpu); in handle_task_switch()
5847 kvm_clear_exception_queue(vcpu); in handle_task_switch()
5858 skip_emulated_instruction(vcpu); in handle_task_switch()
5860 if (kvm_task_switch(vcpu, tss_selector, in handle_task_switch()
5863 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in handle_task_switch()
5864 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in handle_task_switch()
5865 vcpu->run->internal.ndata = 0; in handle_task_switch()
5877 static int handle_ept_violation(struct kvm_vcpu *vcpu) in handle_ept_violation() argument
5894 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; in handle_ept_violation()
5895 vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION; in handle_ept_violation()
5905 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && in handle_ept_violation()
5920 vcpu->arch.exit_qualification = exit_qualification; in handle_ept_violation()
5922 return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0); in handle_ept_violation()
5925 static int handle_ept_misconfig(struct kvm_vcpu *vcpu) in handle_ept_misconfig() argument
5931 if (!kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) { in handle_ept_misconfig()
5932 skip_emulated_instruction(vcpu); in handle_ept_misconfig()
5937 ret = handle_mmio_page_fault(vcpu, gpa, true); in handle_ept_misconfig()
5939 return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) == in handle_ept_misconfig()
5943 return kvm_mmu_page_fault(vcpu, gpa, 0, NULL, 0); in handle_ept_misconfig()
5951 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; in handle_ept_misconfig()
5952 vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG; in handle_ept_misconfig()
5957 static int handle_nmi_window(struct kvm_vcpu *vcpu) in handle_nmi_window() argument
5965 ++vcpu->stat.nmi_window_exits; in handle_nmi_window()
5966 kvm_make_request(KVM_REQ_EVENT, vcpu); in handle_nmi_window()
5971 static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) in handle_invalid_guest_state() argument
5973 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_invalid_guest_state()
5984 if (intr_window_requested && vmx_interrupt_allowed(vcpu)) in handle_invalid_guest_state()
5985 return handle_interrupt_window(&vmx->vcpu); in handle_invalid_guest_state()
5987 if (test_bit(KVM_REQ_EVENT, &vcpu->requests)) in handle_invalid_guest_state()
5990 err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE); in handle_invalid_guest_state()
5993 ++vcpu->stat.mmio_exits; in handle_invalid_guest_state()
5999 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in handle_invalid_guest_state()
6000 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in handle_invalid_guest_state()
6001 vcpu->run->internal.ndata = 0; in handle_invalid_guest_state()
6005 if (vcpu->arch.halt_request) { in handle_invalid_guest_state()
6006 vcpu->arch.halt_request = 0; in handle_invalid_guest_state()
6007 ret = kvm_vcpu_halt(vcpu); in handle_invalid_guest_state()
6049 static void grow_ple_window(struct kvm_vcpu *vcpu) in grow_ple_window() argument
6051 struct vcpu_vmx *vmx = to_vmx(vcpu); in grow_ple_window()
6059 trace_kvm_ple_window_grow(vcpu->vcpu_id, vmx->ple_window, old); in grow_ple_window()
6062 static void shrink_ple_window(struct kvm_vcpu *vcpu) in shrink_ple_window() argument
6064 struct vcpu_vmx *vmx = to_vmx(vcpu); in shrink_ple_window()
6073 trace_kvm_ple_window_shrink(vcpu->vcpu_id, vmx->ple_window, old); in shrink_ple_window()
6096 struct kvm_vcpu *vcpu; in wakeup_handler() local
6100 list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu), in wakeup_handler()
6102 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); in wakeup_handler()
6105 kvm_vcpu_kick(vcpu); in wakeup_handler()
6349 static int handle_pause(struct kvm_vcpu *vcpu) in handle_pause() argument
6352 grow_ple_window(vcpu); in handle_pause()
6354 skip_emulated_instruction(vcpu); in handle_pause()
6355 kvm_vcpu_on_spin(vcpu); in handle_pause()
6360 static int handle_nop(struct kvm_vcpu *vcpu) in handle_nop() argument
6362 skip_emulated_instruction(vcpu); in handle_nop()
6366 static int handle_mwait(struct kvm_vcpu *vcpu) in handle_mwait() argument
6369 return handle_nop(vcpu); in handle_mwait()
6372 static int handle_monitor_trap(struct kvm_vcpu *vcpu) in handle_monitor_trap() argument
6377 static int handle_monitor(struct kvm_vcpu *vcpu) in handle_monitor() argument
6380 return handle_nop(vcpu); in handle_monitor()
6475 static void nested_vmx_succeed(struct kvm_vcpu *vcpu) in nested_vmx_succeed() argument
6477 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu) in nested_vmx_succeed()
6482 static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu) in nested_vmx_failInvalid() argument
6484 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) in nested_vmx_failInvalid()
6490 static void nested_vmx_failValid(struct kvm_vcpu *vcpu, in nested_vmx_failValid() argument
6493 if (to_vmx(vcpu)->nested.current_vmptr == -1ull) { in nested_vmx_failValid()
6498 nested_vmx_failInvalid(vcpu); in nested_vmx_failValid()
6501 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) in nested_vmx_failValid()
6505 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error; in nested_vmx_failValid()
6512 static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator) in nested_vmx_abort() argument
6515 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); in nested_vmx_abort()
6525 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); in vmx_preemption_timer_fn()
6526 kvm_vcpu_kick(&vmx->vcpu); in vmx_preemption_timer_fn()
6537 static int get_vmx_mem_address(struct kvm_vcpu *vcpu, in get_vmx_mem_address() argument
6563 kvm_queue_exception(vcpu, UD_VECTOR); in get_vmx_mem_address()
6571 off += kvm_register_read(vcpu, base_reg); in get_vmx_mem_address()
6573 off += kvm_register_read(vcpu, index_reg)<<scaling; in get_vmx_mem_address()
6574 vmx_get_segment(vcpu, &s, seg_reg); in get_vmx_mem_address()
6582 if (is_protmode(vcpu)) { in get_vmx_mem_address()
6601 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); in get_vmx_mem_address()
6604 if (is_long_mode(vcpu)) { in get_vmx_mem_address()
6609 } else if (is_protmode(vcpu)) { in get_vmx_mem_address()
6619 kvm_queue_exception_e(vcpu, in get_vmx_mem_address()
6636 static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason, in nested_vmx_check_vmptr() argument
6643 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_check_vmptr()
6644 int maxphyaddr = cpuid_maxphyaddr(vcpu); in nested_vmx_check_vmptr()
6646 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), in nested_vmx_check_vmptr()
6650 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr, in nested_vmx_check_vmptr()
6652 kvm_inject_page_fault(vcpu, &e); in nested_vmx_check_vmptr()
6669 nested_vmx_failInvalid(vcpu); in nested_vmx_check_vmptr()
6670 skip_emulated_instruction(vcpu); in nested_vmx_check_vmptr()
6674 page = nested_get_page(vcpu, vmptr); in nested_vmx_check_vmptr()
6677 nested_vmx_failInvalid(vcpu); in nested_vmx_check_vmptr()
6679 skip_emulated_instruction(vcpu); in nested_vmx_check_vmptr()
6687 nested_vmx_failValid(vcpu, in nested_vmx_check_vmptr()
6689 skip_emulated_instruction(vcpu); in nested_vmx_check_vmptr()
6694 nested_vmx_failValid(vcpu, in nested_vmx_check_vmptr()
6696 skip_emulated_instruction(vcpu); in nested_vmx_check_vmptr()
6702 nested_vmx_failValid(vcpu, in nested_vmx_check_vmptr()
6704 skip_emulated_instruction(vcpu); in nested_vmx_check_vmptr()
6709 nested_vmx_failValid(vcpu, in nested_vmx_check_vmptr()
6711 skip_emulated_instruction(vcpu); in nested_vmx_check_vmptr()
6732 static int handle_vmon(struct kvm_vcpu *vcpu) in handle_vmon() argument
6735 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmon()
6745 if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE) || in handle_vmon()
6746 !kvm_read_cr0_bits(vcpu, X86_CR0_PE) || in handle_vmon()
6747 (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) { in handle_vmon()
6748 kvm_queue_exception(vcpu, UD_VECTOR); in handle_vmon()
6752 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); in handle_vmon()
6753 if (is_long_mode(vcpu) && !cs.l) { in handle_vmon()
6754 kvm_queue_exception(vcpu, UD_VECTOR); in handle_vmon()
6758 if (vmx_get_cpl(vcpu)) { in handle_vmon()
6759 kvm_inject_gp(vcpu, 0); in handle_vmon()
6763 if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL)) in handle_vmon()
6767 nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); in handle_vmon()
6768 skip_emulated_instruction(vcpu); in handle_vmon()
6774 kvm_inject_gp(vcpu, 0); in handle_vmon()
6798 skip_emulated_instruction(vcpu); in handle_vmon()
6799 nested_vmx_succeed(vcpu); in handle_vmon()
6808 static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) in nested_vmx_check_permission() argument
6811 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_check_permission()
6814 kvm_queue_exception(vcpu, UD_VECTOR); in nested_vmx_check_permission()
6818 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); in nested_vmx_check_permission()
6819 if ((vmx_get_rflags(vcpu) & X86_EFLAGS_VM) || in nested_vmx_check_permission()
6820 (is_long_mode(vcpu) && !cs.l)) { in nested_vmx_check_permission()
6821 kvm_queue_exception(vcpu, UD_VECTOR); in nested_vmx_check_permission()
6825 if (vmx_get_cpl(vcpu)) { in nested_vmx_check_permission()
6826 kvm_inject_gp(vcpu, 0); in nested_vmx_check_permission()
6892 static int handle_vmoff(struct kvm_vcpu *vcpu) in handle_vmoff() argument
6894 if (!nested_vmx_check_permission(vcpu)) in handle_vmoff()
6896 free_nested(to_vmx(vcpu)); in handle_vmoff()
6897 skip_emulated_instruction(vcpu); in handle_vmoff()
6898 nested_vmx_succeed(vcpu); in handle_vmoff()
6903 static int handle_vmclear(struct kvm_vcpu *vcpu) in handle_vmclear() argument
6905 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmclear()
6910 if (!nested_vmx_check_permission(vcpu)) in handle_vmclear()
6913 if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr)) in handle_vmclear()
6919 page = nested_get_page(vcpu, vmptr); in handle_vmclear()
6928 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); in handle_vmclear()
6938 skip_emulated_instruction(vcpu); in handle_vmclear()
6939 nested_vmx_succeed(vcpu); in handle_vmclear()
6943 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
6946 static int handle_vmlaunch(struct kvm_vcpu *vcpu) in handle_vmlaunch() argument
6948 return nested_vmx_run(vcpu, true); in handle_vmlaunch()
6952 static int handle_vmresume(struct kvm_vcpu *vcpu) in handle_vmresume() argument
6955 return nested_vmx_run(vcpu, false); in handle_vmresume()
6984 static inline int vmcs12_read_any(struct kvm_vcpu *vcpu, in vmcs12_read_any() argument
6993 p = ((char *)(get_vmcs12(vcpu))) + offset; in vmcs12_read_any()
7015 static inline int vmcs12_write_any(struct kvm_vcpu *vcpu, in vmcs12_write_any() argument
7018 char *p = ((char *) get_vmcs12(vcpu)) + offset; in vmcs12_write_any()
7074 vmcs12_write_any(&vmx->vcpu, field, field_value); in copy_shadow_to_vmcs12()
7103 vmcs12_read_any(&vmx->vcpu, field, &field_value); in copy_vmcs12_to_shadow()
7133 static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu) in nested_vmx_check_vmcs12() argument
7135 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_check_vmcs12()
7137 nested_vmx_failInvalid(vcpu); in nested_vmx_check_vmcs12()
7138 skip_emulated_instruction(vcpu); in nested_vmx_check_vmcs12()
7144 static int handle_vmread(struct kvm_vcpu *vcpu) in handle_vmread() argument
7152 if (!nested_vmx_check_permission(vcpu) || in handle_vmread()
7153 !nested_vmx_check_vmcs12(vcpu)) in handle_vmread()
7157 field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); in handle_vmread()
7159 if (vmcs12_read_any(vcpu, field, &field_value) < 0) { in handle_vmread()
7160 nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); in handle_vmread()
7161 skip_emulated_instruction(vcpu); in handle_vmread()
7170 kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf), in handle_vmread()
7173 if (get_vmx_mem_address(vcpu, exit_qualification, in handle_vmread()
7177 kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva, in handle_vmread()
7178 &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL); in handle_vmread()
7181 nested_vmx_succeed(vcpu); in handle_vmread()
7182 skip_emulated_instruction(vcpu); in handle_vmread()
7187 static int handle_vmwrite(struct kvm_vcpu *vcpu) in handle_vmwrite() argument
7202 if (!nested_vmx_check_permission(vcpu) || in handle_vmwrite()
7203 !nested_vmx_check_vmcs12(vcpu)) in handle_vmwrite()
7207 field_value = kvm_register_readl(vcpu, in handle_vmwrite()
7210 if (get_vmx_mem_address(vcpu, exit_qualification, in handle_vmwrite()
7213 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, in handle_vmwrite()
7214 &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) { in handle_vmwrite()
7215 kvm_inject_page_fault(vcpu, &e); in handle_vmwrite()
7221 field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); in handle_vmwrite()
7223 nested_vmx_failValid(vcpu, in handle_vmwrite()
7225 skip_emulated_instruction(vcpu); in handle_vmwrite()
7229 if (vmcs12_write_any(vcpu, field, field_value) < 0) { in handle_vmwrite()
7230 nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); in handle_vmwrite()
7231 skip_emulated_instruction(vcpu); in handle_vmwrite()
7235 nested_vmx_succeed(vcpu); in handle_vmwrite()
7236 skip_emulated_instruction(vcpu); in handle_vmwrite()
7241 static int handle_vmptrld(struct kvm_vcpu *vcpu) in handle_vmptrld() argument
7243 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmptrld()
7246 if (!nested_vmx_check_permission(vcpu)) in handle_vmptrld()
7249 if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr)) in handle_vmptrld()
7255 page = nested_get_page(vcpu, vmptr); in handle_vmptrld()
7257 nested_vmx_failInvalid(vcpu); in handle_vmptrld()
7258 skip_emulated_instruction(vcpu); in handle_vmptrld()
7265 nested_vmx_failValid(vcpu, in handle_vmptrld()
7267 skip_emulated_instruction(vcpu); in handle_vmptrld()
7284 nested_vmx_succeed(vcpu); in handle_vmptrld()
7285 skip_emulated_instruction(vcpu); in handle_vmptrld()
7290 static int handle_vmptrst(struct kvm_vcpu *vcpu) in handle_vmptrst() argument
7297 if (!nested_vmx_check_permission(vcpu)) in handle_vmptrst()
7300 if (get_vmx_mem_address(vcpu, exit_qualification, in handle_vmptrst()
7304 if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva, in handle_vmptrst()
7305 (void *)&to_vmx(vcpu)->nested.current_vmptr, in handle_vmptrst()
7307 kvm_inject_page_fault(vcpu, &e); in handle_vmptrst()
7310 nested_vmx_succeed(vcpu); in handle_vmptrst()
7311 skip_emulated_instruction(vcpu); in handle_vmptrst()
7316 static int handle_invept(struct kvm_vcpu *vcpu) in handle_invept() argument
7318 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_invept()
7330 kvm_queue_exception(vcpu, UD_VECTOR); in handle_invept()
7334 if (!nested_vmx_check_permission(vcpu)) in handle_invept()
7337 if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) { in handle_invept()
7338 kvm_queue_exception(vcpu, UD_VECTOR); in handle_invept()
7343 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); in handle_invept()
7348 nested_vmx_failValid(vcpu, in handle_invept()
7350 skip_emulated_instruction(vcpu); in handle_invept()
7357 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), in handle_invept()
7360 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand, in handle_invept()
7362 kvm_inject_page_fault(vcpu, &e); in handle_invept()
7368 kvm_mmu_sync_roots(vcpu); in handle_invept()
7369 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in handle_invept()
7370 nested_vmx_succeed(vcpu); in handle_invept()
7378 skip_emulated_instruction(vcpu); in handle_invept()
7382 static int handle_invvpid(struct kvm_vcpu *vcpu) in handle_invvpid() argument
7384 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_invvpid()
7394 kvm_queue_exception(vcpu, UD_VECTOR); in handle_invvpid()
7398 if (!nested_vmx_check_permission(vcpu)) in handle_invvpid()
7402 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); in handle_invvpid()
7407 nested_vmx_failValid(vcpu, in handle_invvpid()
7409 skip_emulated_instruction(vcpu); in handle_invvpid()
7416 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), in handle_invvpid()
7419 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vpid, in handle_invvpid()
7421 kvm_inject_page_fault(vcpu, &e); in handle_invvpid()
7432 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02); in handle_invvpid()
7433 nested_vmx_succeed(vcpu); in handle_invvpid()
7441 skip_emulated_instruction(vcpu); in handle_invvpid()
7445 static int handle_pml_full(struct kvm_vcpu *vcpu) in handle_pml_full() argument
7449 trace_kvm_pml_full(vcpu->vcpu_id); in handle_pml_full()
7457 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && in handle_pml_full()
7470 static int handle_pcommit(struct kvm_vcpu *vcpu) in handle_pcommit() argument
7482 static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
7533 static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, in nested_vmx_exit_handled_io() argument
7563 if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1)) in nested_vmx_exit_handled_io()
7582 static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, in nested_vmx_exit_handled_msr() argument
7585 u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX]; in nested_vmx_exit_handled_msr()
7607 if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1)) in nested_vmx_exit_handled_msr()
7619 static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, in nested_vmx_exit_handled_cr() argument
7625 unsigned long val = kvm_register_readl(vcpu, reg); in nested_vmx_exit_handled_cr()
7700 static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) in nested_vmx_exit_handled() argument
7703 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_exit_handled()
7704 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); in nested_vmx_exit_handled()
7707 trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason, in nested_vmx_exit_handled()
7745 if (kvm_register_read(vcpu, VCPU_REGS_RAX) == 0xa) in nested_vmx_exit_handled()
7770 return nested_vmx_exit_handled_cr(vcpu, vmcs12); in nested_vmx_exit_handled()
7774 return nested_vmx_exit_handled_io(vcpu, vmcs12); in nested_vmx_exit_handled()
7777 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason); in nested_vmx_exit_handled()
7836 static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2) in vmx_get_exit_info() argument
7866 static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu) in vmx_flush_pml_buffer() argument
7868 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_flush_pml_buffer()
7890 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); in vmx_flush_pml_buffer()
7904 struct kvm_vcpu *vcpu; in kvm_flush_pml_buffers() local
7911 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_flush_pml_buffers()
7912 kvm_vcpu_kick(vcpu); in kvm_flush_pml_buffers()
8074 static int vmx_handle_exit(struct kvm_vcpu *vcpu) in vmx_handle_exit() argument
8076 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_handle_exit()
8080 trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX); in vmx_handle_exit()
8090 vmx_flush_pml_buffer(vcpu); in vmx_handle_exit()
8094 return handle_invalid_guest_state(vcpu); in vmx_handle_exit()
8096 if (is_guest_mode(vcpu) && nested_vmx_exit_handled(vcpu)) { in vmx_handle_exit()
8097 nested_vmx_vmexit(vcpu, exit_reason, in vmx_handle_exit()
8105 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; in vmx_handle_exit()
8106 vcpu->run->fail_entry.hardware_entry_failure_reason in vmx_handle_exit()
8112 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; in vmx_handle_exit()
8113 vcpu->run->fail_entry.hardware_entry_failure_reason in vmx_handle_exit()
8129 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in vmx_handle_exit()
8130 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; in vmx_handle_exit()
8131 vcpu->run->internal.ndata = 2; in vmx_handle_exit()
8132 vcpu->run->internal.data[0] = vectoring_info; in vmx_handle_exit()
8133 vcpu->run->internal.data[1] = exit_reason; in vmx_handle_exit()
8138 !(is_guest_mode(vcpu) && nested_cpu_has_virtual_nmis( in vmx_handle_exit()
8139 get_vmcs12(vcpu))))) { in vmx_handle_exit()
8140 if (vmx_interrupt_allowed(vcpu)) { in vmx_handle_exit()
8143 vcpu->arch.nmi_pending) { in vmx_handle_exit()
8152 __func__, vcpu->vcpu_id); in vmx_handle_exit()
8159 return kvm_vmx_exit_handlers[exit_reason](vcpu); in vmx_handle_exit()
8162 kvm_queue_exception(vcpu, UD_VECTOR); in vmx_handle_exit()
8167 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) in update_cr8_intercept() argument
8169 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); in update_cr8_intercept()
8171 if (is_guest_mode(vcpu) && in update_cr8_intercept()
8183 static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) in vmx_set_virtual_x2apic_mode() argument
8192 !vmx_cpu_uses_apicv(vcpu)) in vmx_set_virtual_x2apic_mode()
8195 if (!cpu_need_tpr_shadow(vcpu)) in vmx_set_virtual_x2apic_mode()
8209 vmx_set_msr_bitmap(vcpu); in vmx_set_virtual_x2apic_mode()
8212 static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa) in vmx_set_apic_access_page_addr() argument
8214 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_apic_access_page_addr()
8229 if (!is_guest_mode(vcpu) || in vmx_set_apic_access_page_addr()
8269 static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr) in vmx_hwapic_irr_update() argument
8271 if (!is_guest_mode(vcpu)) { in vmx_hwapic_irr_update()
8283 if (nested_exit_on_intr(vcpu)) in vmx_hwapic_irr_update()
8290 if (!kvm_event_needs_reinjection(vcpu) && in vmx_hwapic_irr_update()
8291 vmx_interrupt_allowed(vcpu)) { in vmx_hwapic_irr_update()
8292 kvm_queue_interrupt(vcpu, max_irr, false); in vmx_hwapic_irr_update()
8293 vmx_inject_irq(vcpu); in vmx_hwapic_irr_update()
8297 static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu) in vmx_load_eoi_exitmap() argument
8299 u64 *eoi_exit_bitmap = vcpu->arch.eoi_exit_bitmap; in vmx_load_eoi_exitmap()
8300 if (!vmx_cpu_uses_apicv(vcpu)) in vmx_load_eoi_exitmap()
8327 kvm_before_handle_nmi(&vmx->vcpu); in vmx_complete_atomic_exit()
8329 kvm_after_handle_nmi(&vmx->vcpu); in vmx_complete_atomic_exit()
8333 static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) in vmx_handle_external_intr() argument
8347 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_handle_external_intr()
8438 static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu, in __vmx_complete_interrupts() argument
8449 vcpu->arch.nmi_injected = false; in __vmx_complete_interrupts()
8450 kvm_clear_exception_queue(vcpu); in __vmx_complete_interrupts()
8451 kvm_clear_interrupt_queue(vcpu); in __vmx_complete_interrupts()
8456 kvm_make_request(KVM_REQ_EVENT, vcpu); in __vmx_complete_interrupts()
8463 vcpu->arch.nmi_injected = true; in __vmx_complete_interrupts()
8469 vmx_set_nmi_mask(vcpu, false); in __vmx_complete_interrupts()
8472 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); in __vmx_complete_interrupts()
8477 kvm_requeue_exception_e(vcpu, vector, err); in __vmx_complete_interrupts()
8479 kvm_requeue_exception(vcpu, vector); in __vmx_complete_interrupts()
8482 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); in __vmx_complete_interrupts()
8485 kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR); in __vmx_complete_interrupts()
8494 __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info, in vmx_complete_interrupts()
8499 static void vmx_cancel_injection(struct kvm_vcpu *vcpu) in vmx_cancel_injection() argument
8501 __vmx_complete_interrupts(vcpu, in vmx_cancel_injection()
8527 static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) in vmx_vcpu_run() argument
8529 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_run()
8551 if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) in vmx_vcpu_run()
8552 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); in vmx_vcpu_run()
8553 if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) in vmx_vcpu_run()
8554 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); in vmx_vcpu_run()
8567 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) in vmx_vcpu_run()
8568 vmx_set_interrupt_shadow(vcpu, 0); in vmx_vcpu_run()
8651 [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])), in vmx_vcpu_run()
8652 [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])), in vmx_vcpu_run()
8653 [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])), in vmx_vcpu_run()
8654 [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])), in vmx_vcpu_run()
8655 [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])), in vmx_vcpu_run()
8656 [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])), in vmx_vcpu_run()
8657 [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])), in vmx_vcpu_run()
8659 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])), in vmx_vcpu_run()
8660 [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])), in vmx_vcpu_run()
8661 [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])), in vmx_vcpu_run()
8662 [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])), in vmx_vcpu_run()
8663 [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])), in vmx_vcpu_run()
8664 [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])), in vmx_vcpu_run()
8665 [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])), in vmx_vcpu_run()
8666 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])), in vmx_vcpu_run()
8668 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)), in vmx_vcpu_run()
8696 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) in vmx_vcpu_run()
8701 vcpu->arch.regs_dirty = 0; in vmx_vcpu_run()
8715 kvm_make_request(KVM_REQ_EVENT, vcpu); in vmx_vcpu_run()
8724 static void vmx_load_vmcs01(struct kvm_vcpu *vcpu) in vmx_load_vmcs01() argument
8726 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_load_vmcs01()
8734 vmx_vcpu_put(vcpu); in vmx_load_vmcs01()
8735 vmx_vcpu_load(vcpu, cpu); in vmx_load_vmcs01()
8736 vcpu->cpu = cpu; in vmx_load_vmcs01()
8740 static void vmx_free_vcpu(struct kvm_vcpu *vcpu) in vmx_free_vcpu() argument
8742 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_free_vcpu()
8747 leave_guest_mode(vcpu); in vmx_free_vcpu()
8748 vmx_load_vmcs01(vcpu); in vmx_free_vcpu()
8752 kvm_vcpu_uninit(vcpu); in vmx_free_vcpu()
8767 err = kvm_vcpu_init(&vmx->vcpu, kvm, id); in vmx_create_vcpu()
8791 vmx_vcpu_load(&vmx->vcpu, cpu); in vmx_create_vcpu()
8792 vmx->vcpu.cpu = cpu; in vmx_create_vcpu()
8794 vmx_vcpu_put(&vmx->vcpu); in vmx_create_vcpu()
8798 if (cpu_need_virtualize_apic_accesses(&vmx->vcpu)) { in vmx_create_vcpu()
8834 return &vmx->vcpu; in vmx_create_vcpu()
8842 kvm_vcpu_uninit(&vmx->vcpu); in vmx_create_vcpu()
8868 static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) in vmx_get_mt_mask() argument
8889 if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) { in vmx_get_mt_mask()
8895 if (kvm_read_cr0(vcpu) & X86_CR0_CD) { in vmx_get_mt_mask()
8897 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) in vmx_get_mt_mask()
8904 cache = kvm_mtrr_get_guest_memory_type(vcpu, gfn); in vmx_get_mt_mask()
8938 static void vmx_cpuid_update(struct kvm_vcpu *vcpu) in vmx_cpuid_update() argument
8941 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_cpuid_update()
8945 bool rdtscp_enabled = guest_cpuid_has_rdtscp(vcpu); in vmx_cpuid_update()
8960 best = kvm_find_cpuid_entry(vcpu, 0x7, 0); in vmx_cpuid_update()
8963 !guest_cpuid_has_pcid(vcpu))) { in vmx_cpuid_update()
8974 if (guest_cpuid_has_pcommit(vcpu)) in vmx_cpuid_update()
8989 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, in nested_ept_inject_page_fault() argument
8992 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); in nested_ept_inject_page_fault()
8999 nested_vmx_vmexit(vcpu, exit_reason, 0, vcpu->arch.exit_qualification); in nested_ept_inject_page_fault()
9005 static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu) in nested_ept_get_cr3() argument
9008 return get_vmcs12(vcpu)->ept_pointer; in nested_ept_get_cr3()
9011 static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) in nested_ept_init_mmu_context() argument
9013 WARN_ON(mmu_is_nested(vcpu)); in nested_ept_init_mmu_context()
9014 kvm_init_shadow_ept_mmu(vcpu, in nested_ept_init_mmu_context()
9015 to_vmx(vcpu)->nested.nested_vmx_ept_caps & in nested_ept_init_mmu_context()
9017 vcpu->arch.mmu.set_cr3 = vmx_set_cr3; in nested_ept_init_mmu_context()
9018 vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3; in nested_ept_init_mmu_context()
9019 vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault; in nested_ept_init_mmu_context()
9021 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; in nested_ept_init_mmu_context()
9024 static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) in nested_ept_uninit_mmu_context() argument
9026 vcpu->arch.walk_mmu = &vcpu->arch.mmu; in nested_ept_uninit_mmu_context()
9041 static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu, in vmx_inject_page_fault_nested() argument
9044 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); in vmx_inject_page_fault_nested()
9046 WARN_ON(!is_guest_mode(vcpu)); in vmx_inject_page_fault_nested()
9049 nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason, in vmx_inject_page_fault_nested()
9053 kvm_inject_page_fault(vcpu, fault); in vmx_inject_page_fault_nested()
9056 static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, in nested_get_vmcs12_pages() argument
9059 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_get_vmcs12_pages()
9060 int maxphyaddr = cpuid_maxphyaddr(vcpu); in nested_get_vmcs12_pages()
9076 nested_get_page(vcpu, vmcs12->apic_access_addr); in nested_get_vmcs12_pages()
9087 nested_get_page(vcpu, vmcs12->virtual_apic_page_addr); in nested_get_vmcs12_pages()
9113 nested_get_page(vcpu, vmcs12->posted_intr_desc_addr); in nested_get_vmcs12_pages()
9132 static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu) in vmx_start_preemption_timer() argument
9134 u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value; in vmx_start_preemption_timer()
9135 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_start_preemption_timer()
9137 if (vcpu->arch.virtual_tsc_khz == 0) in vmx_start_preemption_timer()
9149 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz); in vmx_start_preemption_timer()
9154 static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu, in nested_vmx_check_msr_bitmap_controls() argument
9163 if (vmcs12_read_any(vcpu, MSR_BITMAP, &addr)) { in nested_vmx_check_msr_bitmap_controls()
9167 maxphyaddr = cpuid_maxphyaddr(vcpu); in nested_vmx_check_msr_bitmap_controls()
9180 static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, in nested_vmx_merge_msr_bitmap() argument
9190 page = nested_get_page(vcpu, vmcs12->msr_bitmap); in nested_vmx_merge_msr_bitmap()
9259 static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, in nested_vmx_check_apicv_controls() argument
9281 !nested_exit_on_intr(vcpu)) in nested_vmx_check_apicv_controls()
9291 !nested_exit_intr_ack_set(vcpu) || in nested_vmx_check_apicv_controls()
9302 static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, in nested_vmx_check_msr_switch() argument
9309 if (vmcs12_read_any(vcpu, count_field, &count) || in nested_vmx_check_msr_switch()
9310 vmcs12_read_any(vcpu, addr_field, &addr)) { in nested_vmx_check_msr_switch()
9316 maxphyaddr = cpuid_maxphyaddr(vcpu); in nested_vmx_check_msr_switch()
9327 static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu, in nested_vmx_check_msr_switch_controls() argument
9334 if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT, in nested_vmx_check_msr_switch_controls()
9336 nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT, in nested_vmx_check_msr_switch_controls()
9338 nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT, in nested_vmx_check_msr_switch_controls()
9344 static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu, in nested_vmx_msr_check_common() argument
9348 if (vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8) in nested_vmx_msr_check_common()
9358 static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu, in nested_vmx_load_msr_check() argument
9364 nested_vmx_msr_check_common(vcpu, e)) in nested_vmx_load_msr_check()
9369 static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu, in nested_vmx_store_msr_check() argument
9373 nested_vmx_msr_check_common(vcpu, e)) in nested_vmx_store_msr_check()
9382 static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) in nested_vmx_load_msr() argument
9390 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e), in nested_vmx_load_msr()
9397 if (nested_vmx_load_msr_check(vcpu, &e)) { in nested_vmx_load_msr()
9405 if (kvm_set_msr(vcpu, &msr)) { in nested_vmx_load_msr()
9417 static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) in nested_vmx_store_msr() argument
9424 if (kvm_vcpu_read_guest(vcpu, in nested_vmx_store_msr()
9432 if (nested_vmx_store_msr_check(vcpu, &e)) { in nested_vmx_store_msr()
9440 if (kvm_get_msr(vcpu, &msr_info)) { in nested_vmx_store_msr()
9446 if (kvm_vcpu_write_guest(vcpu, in nested_vmx_store_msr()
9468 static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) in prepare_vmcs02() argument
9470 struct vcpu_vmx *vmx = to_vmx(vcpu); in prepare_vmcs02()
9511 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); in prepare_vmcs02()
9514 kvm_set_dr(vcpu, 7, vcpu->arch.dr7); in prepare_vmcs02()
9526 vmx_set_rflags(vcpu, vmcs12->guest_rflags); in prepare_vmcs02()
9559 vmx_start_preemption_timer(vcpu); in prepare_vmcs02()
9613 cpu_need_virtualize_apic_accesses(&vmx->vcpu)) { in prepare_vmcs02()
9616 kvm_vcpu_reload_apic_access_page(vcpu); in prepare_vmcs02()
9667 nested_vmx_merge_msr_bitmap(vcpu, vmcs12); in prepare_vmcs02()
9685 update_exception_bitmap(vcpu); in prepare_vmcs02()
9686 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; in prepare_vmcs02()
9687 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); in prepare_vmcs02()
9705 vcpu->arch.pat = vmcs12->guest_ia32_pat; in prepare_vmcs02()
9707 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); in prepare_vmcs02()
9734 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02); in prepare_vmcs02()
9738 vmx_flush_tlb(vcpu); in prepare_vmcs02()
9744 kvm_mmu_unload(vcpu); in prepare_vmcs02()
9745 nested_ept_init_mmu_context(vcpu); in prepare_vmcs02()
9749 vcpu->arch.efer = vmcs12->guest_ia32_efer; in prepare_vmcs02()
9751 vcpu->arch.efer |= (EFER_LMA | EFER_LME); in prepare_vmcs02()
9753 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); in prepare_vmcs02()
9755 vmx_set_efer(vcpu, vcpu->arch.efer); in prepare_vmcs02()
9765 vmx_set_cr0(vcpu, vmcs12->guest_cr0); in prepare_vmcs02()
9768 vmx_set_cr4(vcpu, vmcs12->guest_cr4); in prepare_vmcs02()
9772 kvm_set_cr3(vcpu, vmcs12->guest_cr3); in prepare_vmcs02()
9773 kvm_mmu_reset_context(vcpu); in prepare_vmcs02()
9776 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; in prepare_vmcs02()
9788 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp); in prepare_vmcs02()
9789 kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip); in prepare_vmcs02()
9796 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) in nested_vmx_run() argument
9799 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_run()
9805 if (!nested_vmx_check_permission(vcpu) || in nested_vmx_run()
9806 !nested_vmx_check_vmcs12(vcpu)) in nested_vmx_run()
9809 skip_emulated_instruction(vcpu); in nested_vmx_run()
9810 vmcs12 = get_vmcs12(vcpu); in nested_vmx_run()
9826 nested_vmx_failValid(vcpu, in nested_vmx_run()
9834 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); in nested_vmx_run()
9838 if (!nested_get_vmcs12_pages(vcpu, vmcs12)) { in nested_vmx_run()
9839 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); in nested_vmx_run()
9843 if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12)) { in nested_vmx_run()
9844 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); in nested_vmx_run()
9848 if (nested_vmx_check_apicv_controls(vcpu, vmcs12)) { in nested_vmx_run()
9849 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); in nested_vmx_run()
9853 if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12)) { in nested_vmx_run()
9854 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); in nested_vmx_run()
9874 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); in nested_vmx_run()
9880 nested_vmx_failValid(vcpu, in nested_vmx_run()
9885 if (!nested_cr0_valid(vcpu, vmcs12->guest_cr0) || in nested_vmx_run()
9887 nested_vmx_entry_failure(vcpu, vmcs12, in nested_vmx_run()
9892 nested_vmx_entry_failure(vcpu, vmcs12, in nested_vmx_run()
9908 if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) || in nested_vmx_run()
9912 nested_vmx_entry_failure(vcpu, vmcs12, in nested_vmx_run()
9927 if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) || in nested_vmx_run()
9930 nested_vmx_entry_failure(vcpu, vmcs12, in nested_vmx_run()
9945 enter_guest_mode(vcpu); in nested_vmx_run()
9954 vmx_vcpu_put(vcpu); in nested_vmx_run()
9955 vmx_vcpu_load(vcpu, cpu); in nested_vmx_run()
9956 vcpu->cpu = cpu; in nested_vmx_run()
9961 prepare_vmcs02(vcpu, vmcs12); in nested_vmx_run()
9963 msr_entry_idx = nested_vmx_load_msr(vcpu, in nested_vmx_run()
9967 leave_guest_mode(vcpu); in nested_vmx_run()
9968 vmx_load_vmcs01(vcpu); in nested_vmx_run()
9969 nested_vmx_entry_failure(vcpu, vmcs12, in nested_vmx_run()
9977 return kvm_vcpu_halt(vcpu); in nested_vmx_run()
10008 vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) in vmcs12_guest_cr0() argument
10011 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) | in vmcs12_guest_cr0()
10014 vcpu->arch.cr0_guest_owned_bits)); in vmcs12_guest_cr0()
10018 vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) in vmcs12_guest_cr4() argument
10021 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) | in vmcs12_guest_cr4()
10024 vcpu->arch.cr4_guest_owned_bits)); in vmcs12_guest_cr4()
10027 static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, in vmcs12_save_pending_event() argument
10033 if (vcpu->arch.exception.pending && vcpu->arch.exception.reinject) { in vmcs12_save_pending_event()
10034 nr = vcpu->arch.exception.nr; in vmcs12_save_pending_event()
10039 vcpu->arch.event_exit_inst_len; in vmcs12_save_pending_event()
10044 if (vcpu->arch.exception.has_error_code) { in vmcs12_save_pending_event()
10047 vcpu->arch.exception.error_code; in vmcs12_save_pending_event()
10051 } else if (vcpu->arch.nmi_injected) { in vmcs12_save_pending_event()
10054 } else if (vcpu->arch.interrupt.pending) { in vmcs12_save_pending_event()
10055 nr = vcpu->arch.interrupt.nr; in vmcs12_save_pending_event()
10058 if (vcpu->arch.interrupt.soft) { in vmcs12_save_pending_event()
10061 vcpu->arch.event_exit_inst_len; in vmcs12_save_pending_event()
10069 static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) in vmx_check_nested_events() argument
10071 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_check_nested_events()
10073 if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) && in vmx_check_nested_events()
10077 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0); in vmx_check_nested_events()
10081 if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) { in vmx_check_nested_events()
10083 vcpu->arch.interrupt.pending) in vmx_check_nested_events()
10085 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, in vmx_check_nested_events()
10092 vcpu->arch.nmi_pending = 0; in vmx_check_nested_events()
10093 vmx_set_nmi_mask(vcpu, true); in vmx_check_nested_events()
10097 if ((kvm_cpu_has_interrupt(vcpu) || external_intr) && in vmx_check_nested_events()
10098 nested_exit_on_intr(vcpu)) { in vmx_check_nested_events()
10101 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); in vmx_check_nested_events()
10105 return vmx_complete_nested_posted_interrupt(vcpu); in vmx_check_nested_events()
10108 static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu) in vmx_get_preemption_timer_value() argument
10111 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer); in vmx_get_preemption_timer_value()
10117 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz; in vmx_get_preemption_timer_value()
10133 static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, in prepare_vmcs12() argument
10138 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); in prepare_vmcs12()
10139 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); in prepare_vmcs12()
10141 vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); in prepare_vmcs12()
10142 vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP); in prepare_vmcs12()
10186 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) in prepare_vmcs12()
10195 vmx_get_preemption_timer_value(vcpu); in prepare_vmcs12()
10196 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); in prepare_vmcs12()
10220 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); in prepare_vmcs12()
10223 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7); in prepare_vmcs12()
10232 vmcs12->guest_ia32_efer = vcpu->arch.efer; in prepare_vmcs12()
10265 vmcs12_save_pending_event(vcpu, vmcs12); in prepare_vmcs12()
10272 vcpu->arch.nmi_injected = false; in prepare_vmcs12()
10273 kvm_clear_exception_queue(vcpu); in prepare_vmcs12()
10274 kvm_clear_interrupt_queue(vcpu); in prepare_vmcs12()
10286 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, in load_vmcs12_host_state() argument
10292 vcpu->arch.efer = vmcs12->host_ia32_efer; in load_vmcs12_host_state()
10294 vcpu->arch.efer |= (EFER_LMA | EFER_LME); in load_vmcs12_host_state()
10296 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); in load_vmcs12_host_state()
10297 vmx_set_efer(vcpu, vcpu->arch.efer); in load_vmcs12_host_state()
10299 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp); in load_vmcs12_host_state()
10300 kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip); in load_vmcs12_host_state()
10301 vmx_set_rflags(vcpu, X86_EFLAGS_FIXED); in load_vmcs12_host_state()
10308 vmx_set_cr0(vcpu, vmcs12->host_cr0); in load_vmcs12_host_state()
10314 update_exception_bitmap(vcpu); in load_vmcs12_host_state()
10315 vcpu->arch.cr0_guest_owned_bits = (vcpu->fpu_active ? X86_CR0_TS : 0); in load_vmcs12_host_state()
10316 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); in load_vmcs12_host_state()
10322 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); in load_vmcs12_host_state()
10323 kvm_set_cr4(vcpu, vmcs12->host_cr4); in load_vmcs12_host_state()
10325 nested_ept_uninit_mmu_context(vcpu); in load_vmcs12_host_state()
10327 kvm_set_cr3(vcpu, vmcs12->host_cr3); in load_vmcs12_host_state()
10328 kvm_mmu_reset_context(vcpu); in load_vmcs12_host_state()
10331 vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; in load_vmcs12_host_state()
10339 vmx_flush_tlb(vcpu); in load_vmcs12_host_state()
10355 vcpu->arch.pat = vmcs12->host_ia32_pat; in load_vmcs12_host_state()
10376 vmx_set_segment(vcpu, &seg, VCPU_SREG_CS); in load_vmcs12_host_state()
10387 vmx_set_segment(vcpu, &seg, VCPU_SREG_DS); in load_vmcs12_host_state()
10389 vmx_set_segment(vcpu, &seg, VCPU_SREG_ES); in load_vmcs12_host_state()
10391 vmx_set_segment(vcpu, &seg, VCPU_SREG_SS); in load_vmcs12_host_state()
10394 vmx_set_segment(vcpu, &seg, VCPU_SREG_FS); in load_vmcs12_host_state()
10397 vmx_set_segment(vcpu, &seg, VCPU_SREG_GS); in load_vmcs12_host_state()
10405 vmx_set_segment(vcpu, &seg, VCPU_SREG_TR); in load_vmcs12_host_state()
10407 kvm_set_dr(vcpu, 7, 0x400); in load_vmcs12_host_state()
10411 vmx_set_msr_bitmap(vcpu); in load_vmcs12_host_state()
10413 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr, in load_vmcs12_host_state()
10415 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); in load_vmcs12_host_state()
10423 static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, in nested_vmx_vmexit() argument
10427 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_vmexit()
10428 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); in nested_vmx_vmexit()
10433 leave_guest_mode(vcpu); in nested_vmx_vmexit()
10434 prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info, in nested_vmx_vmexit()
10437 if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr, in nested_vmx_vmexit()
10439 nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL); in nested_vmx_vmexit()
10441 vmx_load_vmcs01(vcpu); in nested_vmx_vmexit()
10444 && nested_exit_intr_ack_set(vcpu)) { in nested_vmx_vmexit()
10445 int irq = kvm_cpu_get_interrupt(vcpu); in nested_vmx_vmexit()
10466 load_vmcs12_host_state(vcpu, vmcs12); in nested_vmx_vmexit()
10494 kvm_vcpu_reload_apic_access_page(vcpu); in nested_vmx_vmexit()
10503 nested_vmx_failValid(vcpu, vmcs_read32(VM_INSTRUCTION_ERROR)); in nested_vmx_vmexit()
10505 nested_vmx_succeed(vcpu); in nested_vmx_vmexit()
10510 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in nested_vmx_vmexit()
10516 static void vmx_leave_nested(struct kvm_vcpu *vcpu) in vmx_leave_nested() argument
10518 if (is_guest_mode(vcpu)) in vmx_leave_nested()
10519 nested_vmx_vmexit(vcpu, -1, 0, 0); in vmx_leave_nested()
10520 free_nested(to_vmx(vcpu)); in vmx_leave_nested()
10530 static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu, in nested_vmx_entry_failure() argument
10534 load_vmcs12_host_state(vcpu, vmcs12); in nested_vmx_entry_failure()
10537 nested_vmx_succeed(vcpu); in nested_vmx_entry_failure()
10539 to_vmx(vcpu)->nested.sync_shadow_vmcs = true; in nested_vmx_entry_failure()
10542 static int vmx_check_intercept(struct kvm_vcpu *vcpu, in vmx_check_intercept() argument
10549 static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu) in vmx_sched_in() argument
10552 shrink_ple_window(vcpu); in vmx_sched_in()
10593 static int vmx_pre_block(struct kvm_vcpu *vcpu) in vmx_pre_block() argument
10598 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); in vmx_pre_block()
10600 if (!kvm_arch_has_assigned_device(vcpu->kvm) || in vmx_pre_block()
10604 vcpu->pre_pcpu = vcpu->cpu; in vmx_pre_block()
10606 vcpu->pre_pcpu), flags); in vmx_pre_block()
10607 list_add_tail(&vcpu->blocked_vcpu_list, in vmx_pre_block()
10609 vcpu->pre_pcpu)); in vmx_pre_block()
10611 vcpu->pre_pcpu), flags); in vmx_pre_block()
10622 vcpu->pre_pcpu), flags); in vmx_pre_block()
10623 list_del(&vcpu->blocked_vcpu_list); in vmx_pre_block()
10626 vcpu->pre_pcpu), flags); in vmx_pre_block()
10627 vcpu->pre_pcpu = -1; in vmx_pre_block()
10644 dest = cpu_physical_id(vcpu->pre_pcpu); in vmx_pre_block()
10659 static void vmx_post_block(struct kvm_vcpu *vcpu) in vmx_post_block() argument
10661 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); in vmx_post_block()
10666 if (!kvm_arch_has_assigned_device(vcpu->kvm) || in vmx_post_block()
10673 dest = cpu_physical_id(vcpu->cpu); in vmx_post_block()
10688 if(vcpu->pre_pcpu != -1) { in vmx_post_block()
10691 vcpu->pre_pcpu), flags); in vmx_post_block()
10692 list_del(&vcpu->blocked_vcpu_list); in vmx_post_block()
10695 vcpu->pre_pcpu), flags); in vmx_post_block()
10696 vcpu->pre_pcpu = -1; in vmx_post_block()
10715 struct kvm_vcpu *vcpu; in vmx_update_pi_irte() local
10744 if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) in vmx_update_pi_irte()
10747 vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu)); in vmx_update_pi_irte()
10750 trace_kvm_pi_irte_update(vcpu->vcpu_id, e->gsi, in vmx_update_pi_irte()
10757 pi_set_sn(vcpu_to_pi_desc(vcpu)); in vmx_update_pi_irte()
10759 pi_clear_sn(vcpu_to_pi_desc(vcpu)); in vmx_update_pi_irte()