Lines Matching refs:to_vmx

611 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)  in to_vmx()  function
618 return &(to_vmx(vcpu)->pi_desc); in vcpu_to_pi_desc()
839 return to_vmx(vcpu)->nested.current_vmcs12; in get_vmcs12()
1649 if (to_vmx(vcpu)->rmode.vm86_active) in update_exception_bitmap()
1885 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_save_host_state()
2041 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_load()
2114 __vmx_load_host_state(to_vmx(vcpu)); in vmx_vcpu_put()
2116 __loaded_vmcs_clear(to_vmx(vcpu)->loaded_vmcs); in vmx_vcpu_put()
2193 if (to_vmx(vcpu)->rmode.vm86_active) { in vmx_get_rflags()
2195 save_rflags = to_vmx(vcpu)->rmode.save_rflags; in vmx_get_rflags()
2198 to_vmx(vcpu)->rflags = rflags; in vmx_get_rflags()
2200 return to_vmx(vcpu)->rflags; in vmx_get_rflags()
2206 to_vmx(vcpu)->rflags = rflags; in vmx_set_rflags()
2207 if (to_vmx(vcpu)->rmode.vm86_active) { in vmx_set_rflags()
2208 to_vmx(vcpu)->rmode.save_rflags = rflags; in vmx_set_rflags()
2266 nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason, in nested_vmx_check_exception()
2276 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_queue_exception()
2416 to_vmx(vcpu)->nested.vmcs01_tsc_offset : in vmx_read_l1_tsc()
2439 to_vmx(vcpu)->nested.vmcs01_tsc_offset = offset; in vmx_write_tsc_offset()
2459 to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment; in vmx_adjust_tsc_offset_guest()
2684 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_vmx_msr()
2796 vmx_load_host_state(to_vmx(vcpu)); in vmx_get_msr()
2797 msr_info->data = to_vmx(vcpu)->msr_guest_kernel_gs_base; in vmx_get_msr()
2822 msr_info->data = to_vmx(vcpu)->nested.msr_ia32_feature_control; in vmx_get_msr()
2838 msr = find_msr_entry(to_vmx(vcpu), msr_info->index); in vmx_get_msr()
2858 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_msr()
2914 (to_vmx(vcpu)->nested.msr_ia32_feature_control & in vmx_set_msr()
3436 struct vcpu_vmx *vmx = to_vmx(vcpu); in enter_pmode()
3509 struct vcpu_vmx *vmx = to_vmx(vcpu); in enter_rmode()
3556 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_efer()
3566 vmx_load_host_state(to_vmx(vcpu)); in vmx_set_efer()
3569 vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); in vmx_set_efer()
3572 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); in vmx_set_efer()
3585 vmx_segment_cache_clear(to_vmx(vcpu)); in enter_lmode()
3600 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); in exit_lmode()
3618 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid); in vmx_flush_tlb()
3709 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_cr0()
3792 (to_vmx(vcpu)->rmode.vm86_active ? in vmx_set_cr4()
3805 if (to_vmx(vcpu)->nested.vmxon && in vmx_set_cr4()
3837 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_segment()
3875 if (to_vmx(vcpu)->rmode.vm86_active) { in vmx_get_segment_base()
3879 return vmx_read_guest_seg_base(to_vmx(vcpu), seg); in vmx_get_segment_base()
3884 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_cpl()
3917 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_segment()
3957 u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS); in vmx_get_cs_db_l_bits()
4500 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_complete_nested_posted_interrupt()
4539 struct vcpu_vmx *vmx = to_vmx(vcpu); in kvm_vcpu_trigger_posted_interrupt()
4566 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_deliver_nested_posted_interrupt()
4591 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_deliver_posted_interrupt()
4609 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_sync_pir_to_irr()
4874 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_reset()
5019 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_inject_irq()
5046 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_inject_nmi()
5078 return to_vmx(vcpu)->soft_vnmi_blocked; in vmx_get_nmi_mask()
5079 if (to_vmx(vcpu)->nmi_known_unmasked) in vmx_get_nmi_mask()
5086 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_nmi_mask()
5106 if (to_vmx(vcpu)->nested.nested_run_pending) in vmx_nmi_allowed()
5109 if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked) in vmx_nmi_allowed()
5119 return (!to_vmx(vcpu)->nested.nested_run_pending && in vmx_interrupt_allowed()
5145 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len = in rmode_exception()
5223 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_exception()
5382 if (to_vmx(vcpu)->nested.nested_vmx_secondary_ctls_high & in nested_cr0_valid()
5415 if (to_vmx(vcpu)->nested.vmxon && in handle_set_cr0()
5814 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_task_switch()
5905 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && in handle_ept_violation()
5973 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_invalid_guest_state()
6051 struct vcpu_vmx *vmx = to_vmx(vcpu); in grow_ple_window()
6064 struct vcpu_vmx *vmx = to_vmx(vcpu); in shrink_ple_window()
6493 if (to_vmx(vcpu)->nested.current_vmptr == -1ull) { in nested_vmx_failValid()
6643 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_check_vmptr()
6735 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmon()
6811 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_check_permission()
6896 free_nested(to_vmx(vcpu)); in handle_vmoff()
6905 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmclear()
7135 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_check_vmcs12()
7243 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmptrld()
7305 (void *)&to_vmx(vcpu)->nested.current_vmptr, in handle_vmptrst()
7318 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_invept()
7384 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_invvpid()
7432 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02); in handle_invvpid()
7457 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && in handle_pml_full()
7703 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_exit_handled()
7868 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_flush_pml_buffer()
8076 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_handle_exit()
8214 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_apic_access_page_addr()
8347 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_handle_external_intr()
8529 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_run()
8726 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_load_vmcs01()
8742 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_free_vcpu()
8941 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_cpuid_update()
9015 to_vmx(vcpu)->nested.nested_vmx_ept_caps & in nested_ept_init_mmu_context()
9049 nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason, in vmx_inject_page_fault_nested()
9059 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_get_vmcs12_pages()
9135 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_start_preemption_timer()
9470 struct vcpu_vmx *vmx = to_vmx(vcpu); in prepare_vmcs02()
9734 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02); in prepare_vmcs02()
9799 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_run()
10071 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_check_nested_events()
10111 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer); in vmx_get_preemption_timer_value()
10196 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); in prepare_vmcs12()
10220 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); in prepare_vmcs12()
10427 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_vmexit()
10520 free_nested(to_vmx(vcpu)); in vmx_leave_nested()
10539 to_vmx(vcpu)->nested.sync_shadow_vmcs = true; in nested_vmx_entry_failure()