to_vmx 336 arch/x86/kvm/vmx/evmcs.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 352 arch/x86/kvm/vmx/evmcs.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 173 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 220 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 236 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 295 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 320 arch/x86/kvm/vmx/nested.c vmx_switch_vmcs(vcpu, &to_vmx(vcpu)->vmcs01); to_vmx 329 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 352 arch/x86/kvm/vmx/nested.c to_vmx(vcpu)->nested.msrs.ept_caps & to_vmx 429 arch/x86/kvm/vmx/nested.c !to_vmx(vcpu)->nested.nested_run_pending) { to_vmx 493 arch/x86/kvm/vmx/nested.c msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap; to_vmx 566 arch/x86/kvm/vmx/nested.c unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap; to_vmx 567 arch/x86/kvm/vmx/nested.c struct kvm_host_map *map = &to_vmx(vcpu)->nested.msr_bitmap_map; to_vmx 653 arch/x86/kvm/vmx/nested.c kvm_vcpu_unmap(vcpu, &to_vmx(vcpu)->nested.msr_bitmap_map, false); to_vmx 680 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 881 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 1038 arch/x86/kvm/vmx/nested.c (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02); to_vmx 1043 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 1218 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 1821 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 1903 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 1940 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 2309 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 2464 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 2504 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 2563 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 2580 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 2802 arch/x86/kvm/vmx/nested.c if (to_vmx(vcpu)->nested.nested_run_pending && to_vmx 2825 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 2934 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 3019 arch/x86/kvm/vmx/nested.c if (!to_vmx(vcpu)->nested.vmxon) { to_vmx 3056 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 3200 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 3408 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 3465 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 3524 arch/x86/kvm/vmx/nested.c hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer); to_vmx 3585 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 3632 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 3660 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 3719 arch/x86/kvm/vmx/nested.c (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); to_vmx 3963 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 4076 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 4085 arch/x86/kvm/vmx/nested.c hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); to_vmx 4356 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 4377 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 4437 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 4499 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 4541 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 4603 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 4691 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 4812 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 4878 arch/x86/kvm/vmx/nested.c gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr; to_vmx 4885 arch/x86/kvm/vmx/nested.c if (unlikely(to_vmx(vcpu)->nested.hv_evmcs)) to_vmx 4903 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 4960 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 5083 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 5327 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 5525 arch/x86/kvm/vmx/nested.c vmx = to_vmx(vcpu); to_vmx 5612 arch/x86/kvm/vmx/nested.c to_vmx(vcpu)->nested.nested_run_pending = 0; to_vmx 5622 arch/x86/kvm/vmx/nested.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 40 arch/x86/kvm/vmx/nested.h return to_vmx(vcpu)->nested.cached_vmcs12; to_vmx 45 arch/x86/kvm/vmx/nested.h return to_vmx(vcpu)->nested.cached_shadow_vmcs12; to_vmx 50 arch/x86/kvm/vmx/nested.h struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 119 arch/x86/kvm/vmx/nested.h return vmx_misc_cr3_count(to_vmx(vcpu)->nested.msrs.misc_low); to_vmx 129 arch/x86/kvm/vmx/nested.h return to_vmx(vcpu)->nested.msrs.misc_low & to_vmx 135 arch/x86/kvm/vmx/nested.h return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS; to_vmx 140 arch/x86/kvm/vmx/nested.h return to_vmx(vcpu)->nested.msrs.procbased_ctls_high & to_vmx 146 arch/x86/kvm/vmx/nested.h return to_vmx(vcpu)->nested.msrs.secondary_ctls_high & to_vmx 262 arch/x86/kvm/vmx/nested.h u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0; to_vmx 263 arch/x86/kvm/vmx/nested.h u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1; to_vmx 266 arch/x86/kvm/vmx/nested.h if (to_vmx(vcpu)->nested.msrs.secondary_ctls_high & to_vmx 276 arch/x86/kvm/vmx/nested.h u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0; to_vmx 277 arch/x86/kvm/vmx/nested.h u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1; to_vmx 284 arch/x86/kvm/vmx/nested.h u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr4_fixed0; to_vmx 285 arch/x86/kvm/vmx/nested.h u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr4_fixed1; to_vmx 468 arch/x86/kvm/vmx/vmx.c tmp_eptp = to_vmx(vcpu)->ept_pointer; to_vmx 469 arch/x86/kvm/vmx/vmx.c } else if (tmp_eptp != to_vmx(vcpu)->ept_pointer) { to_vmx 491 arch/x86/kvm/vmx/vmx.c u64 ept_pointer = to_vmx(vcpu)->ept_pointer; to_vmx 519 arch/x86/kvm/vmx/vmx.c if (VALID_PAGE(to_vmx(vcpu)->ept_pointer)) to_vmx 551 arch/x86/kvm/vmx/vmx.c evmcs = (struct hv_enlightened_vmcs *)to_vmx(vcpu)->loaded_vmcs->vmcs; to_vmx 769 arch/x86/kvm/vmx/vmx.c if (to_vmx(vcpu)->rmode.vm86_active) to_vmx 796 arch/x86/kvm/vmx/vmx.c msr_bitmap = to_vmx(vcpu)->loaded_vmcs->msr_bitmap; to_vmx 1109 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 1292 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 1367 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 1394 arch/x86/kvm/vmx/vmx.c vmx_prepare_switch_to_host(to_vmx(vcpu)); to_vmx 1411 arch/x86/kvm/vmx/vmx.c if (to_vmx(vcpu)->rmode.vm86_active) { to_vmx 1413 arch/x86/kvm/vmx/vmx.c save_rflags = to_vmx(vcpu)->rmode.save_rflags; to_vmx 1416 arch/x86/kvm/vmx/vmx.c to_vmx(vcpu)->rflags = rflags; to_vmx 1418 arch/x86/kvm/vmx/vmx.c return to_vmx(vcpu)->rflags; to_vmx 1426 arch/x86/kvm/vmx/vmx.c to_vmx(vcpu)->rflags = rflags; to_vmx 1427 arch/x86/kvm/vmx/vmx.c if (to_vmx(vcpu)->rmode.vm86_active) { to_vmx 1428 arch/x86/kvm/vmx/vmx.c to_vmx(vcpu)->rmode.save_rflags = rflags; to_vmx 1433 arch/x86/kvm/vmx/vmx.c if ((old_rflags ^ to_vmx(vcpu)->rflags) & X86_EFLAGS_VM) to_vmx 1434 arch/x86/kvm/vmx/vmx.c to_vmx(vcpu)->emulation_required = emulation_required(vcpu); to_vmx 1468 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 1551 arch/x86/kvm/vmx/vmx.c to_vmx(vcpu)->exit_reason != EXIT_REASON_EPT_MISCONFIG) { to_vmx 1581 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 1726 arch/x86/kvm/vmx/vmx.c uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits; to_vmx 1752 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 1781 arch/x86/kvm/vmx/vmx.c msr_info->data = to_vmx(vcpu)->spec_ctrl; to_vmx 1892 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 2036 arch/x86/kvm/vmx/vmx.c !(to_vmx(vcpu)->msr_ia32_feature_control & to_vmx 2044 arch/x86/kvm/vmx/vmx.c (to_vmx(vcpu)->msr_ia32_feature_control & to_vmx 2646 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 2719 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 2767 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 2775 arch/x86/kvm/vmx/vmx.c vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); to_vmx 2778 arch/x86/kvm/vmx/vmx.c vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); to_vmx 2791 arch/x86/kvm/vmx/vmx.c vmx_segment_cache_clear(to_vmx(vcpu)); to_vmx 2806 arch/x86/kvm/vmx/vmx.c vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); to_vmx 2814 arch/x86/kvm/vmx/vmx.c int vpid = to_vmx(vcpu)->vpid; to_vmx 2886 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 2910 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 2984 arch/x86/kvm/vmx/vmx.c to_vmx(vcpu)->ept_pointer = eptp; to_vmx 3006 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 3081 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 3119 arch/x86/kvm/vmx/vmx.c if (to_vmx(vcpu)->rmode.vm86_active) { to_vmx 3123 arch/x86/kvm/vmx/vmx.c return vmx_read_guest_seg_base(to_vmx(vcpu), seg); to_vmx 3128 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 3160 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 3200 arch/x86/kvm/vmx/vmx.c u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS); to_vmx 3662 arch/x86/kvm/vmx/vmx.c (secondary_exec_controls_get(to_vmx(vcpu)) & to_vmx 3699 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 3742 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 3802 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 3828 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 3943 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 4256 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 4355 arch/x86/kvm/vmx/vmx.c exec_controls_setbit(to_vmx(vcpu), CPU_BASED_VIRTUAL_INTR_PENDING); to_vmx 4366 arch/x86/kvm/vmx/vmx.c exec_controls_setbit(to_vmx(vcpu), CPU_BASED_VIRTUAL_NMI_PENDING); to_vmx 4371 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 4399 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 4430 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 4444 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 4464 arch/x86/kvm/vmx/vmx.c if (to_vmx(vcpu)->nested.nested_run_pending) to_vmx 4468 arch/x86/kvm/vmx/vmx.c to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked) to_vmx 4478 arch/x86/kvm/vmx/vmx.c if (to_vmx(vcpu)->nested.nested_run_pending) to_vmx 4518 arch/x86/kvm/vmx/vmx.c to_vmx(vcpu)->vcpu.arch.event_exit_inst_len = to_vmx 4596 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 4770 arch/x86/kvm/vmx/vmx.c if (to_vmx(vcpu)->nested.vmxon && to_vmx 4919 arch/x86/kvm/vmx/vmx.c exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING); to_vmx 4963 arch/x86/kvm/vmx/vmx.c exec_controls_setbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING); to_vmx 4994 arch/x86/kvm/vmx/vmx.c exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_VIRTUAL_INTR_PENDING); to_vmx 5092 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 5161 arch/x86/kvm/vmx/vmx.c if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && to_vmx 5212 arch/x86/kvm/vmx/vmx.c exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_VIRTUAL_NMI_PENDING); to_vmx 5221 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 5275 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 5291 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 5496 arch/x86/kvm/vmx/vmx.c if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && to_vmx 5511 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 5618 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 5846 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 6034 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 6135 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 6192 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 6266 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 6449 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 6485 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 6660 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 6940 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 6979 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 6996 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 7065 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 7073 arch/x86/kvm/vmx/vmx.c to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= to_vmx 7076 arch/x86/kvm/vmx/vmx.c to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= to_vmx 7097 arch/x86/kvm/vmx/vmx.c to_vmx(vcpu)->req_immediate_exit = true; to_vmx 7212 arch/x86/kvm/vmx/vmx.c vmx = to_vmx(vcpu); to_vmx 7247 arch/x86/kvm/vmx/vmx.c to_vmx(vcpu)->hv_deadline_tsc = -1; to_vmx 7278 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 7546 arch/x86/kvm/vmx/vmx.c to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= to_vmx 7549 arch/x86/kvm/vmx/vmx.c to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= to_vmx 7556 arch/x86/kvm/vmx/vmx.c if (to_vmx(vcpu)->nested.nested_run_pending) to_vmx 7563 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 7577 arch/x86/kvm/vmx/vmx.c struct vcpu_vmx *vmx = to_vmx(vcpu); to_vmx 7607 arch/x86/kvm/vmx/vmx.c return to_vmx(vcpu)->nested.vmxon; to_vmx 473 arch/x86/kvm/vmx/vmx.h return &(to_vmx(vcpu)->pi_desc); to_vmx 506 arch/x86/kvm/vmx/vmx.h __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);