vmx               226 arch/x86/events/intel/pt.c 			pt_pmu.vmx = true;
vmx              1395 arch/x86/events/intel/pt.c 	if (pt_pmu.vmx)
vmx                46 arch/x86/events/intel/pt.h 	bool			vmx;
vmx               420 arch/x86/include/uapi/asm/kvm.h 		struct kvm_vmx_nested_state_hdr vmx;
vmx               432 arch/x86/include/uapi/asm/kvm.h 		struct kvm_vmx_nested_state_data vmx[0];
vmx               336 arch/x86/kvm/vmx/evmcs.c        struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx               343 arch/x86/kvm/vmx/evmcs.c        if (vmx->nested.enlightened_vmcs_enabled)
vmx               352 arch/x86/kvm/vmx/evmcs.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx               353 arch/x86/kvm/vmx/evmcs.c 	bool evmcs_already_enabled = vmx->nested.enlightened_vmcs_enabled;
vmx               355 arch/x86/kvm/vmx/evmcs.c 	vmx->nested.enlightened_vmcs_enabled = true;
vmx               364 arch/x86/kvm/vmx/evmcs.c 	vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;
vmx               365 arch/x86/kvm/vmx/evmcs.c 	vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
vmx               366 arch/x86/kvm/vmx/evmcs.c 	vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
vmx               367 arch/x86/kvm/vmx/evmcs.c 	vmx->nested.msrs.secondary_ctls_high &= ~EVMCS1_UNSUPPORTED_2NDEXEC;
vmx               368 arch/x86/kvm/vmx/evmcs.c 	vmx->nested.msrs.vmfunc_controls &= ~EVMCS1_UNSUPPORTED_VMFUNC;
vmx               173 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx               179 arch/x86/kvm/vmx/nested.c 	if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs)
vmx               211 arch/x86/kvm/vmx/nested.c static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
vmx               213 arch/x86/kvm/vmx/nested.c 	secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
vmx               215 arch/x86/kvm/vmx/nested.c 	vmx->nested.need_vmcs12_to_shadow_sync = false;
vmx               220 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx               222 arch/x86/kvm/vmx/nested.c 	if (!vmx->nested.hv_evmcs)
vmx               225 arch/x86/kvm/vmx/nested.c 	kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
vmx               226 arch/x86/kvm/vmx/nested.c 	vmx->nested.hv_evmcs_vmptr = 0;
vmx               227 arch/x86/kvm/vmx/nested.c 	vmx->nested.hv_evmcs = NULL;
vmx               236 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx               238 arch/x86/kvm/vmx/nested.c 	if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
vmx               243 arch/x86/kvm/vmx/nested.c 	vmx->nested.vmxon = false;
vmx               244 arch/x86/kvm/vmx/nested.c 	vmx->nested.smm.vmxon = false;
vmx               245 arch/x86/kvm/vmx/nested.c 	free_vpid(vmx->nested.vpid02);
vmx               246 arch/x86/kvm/vmx/nested.c 	vmx->nested.posted_intr_nv = -1;
vmx               247 arch/x86/kvm/vmx/nested.c 	vmx->nested.current_vmptr = -1ull;
vmx               249 arch/x86/kvm/vmx/nested.c 		vmx_disable_shadow_vmcs(vmx);
vmx               250 arch/x86/kvm/vmx/nested.c 		vmcs_clear(vmx->vmcs01.shadow_vmcs);
vmx               251 arch/x86/kvm/vmx/nested.c 		free_vmcs(vmx->vmcs01.shadow_vmcs);
vmx               252 arch/x86/kvm/vmx/nested.c 		vmx->vmcs01.shadow_vmcs = NULL;
vmx               254 arch/x86/kvm/vmx/nested.c 	kfree(vmx->nested.cached_vmcs12);
vmx               255 arch/x86/kvm/vmx/nested.c 	vmx->nested.cached_vmcs12 = NULL;
vmx               256 arch/x86/kvm/vmx/nested.c 	kfree(vmx->nested.cached_shadow_vmcs12);
vmx               257 arch/x86/kvm/vmx/nested.c 	vmx->nested.cached_shadow_vmcs12 = NULL;
vmx               259 arch/x86/kvm/vmx/nested.c 	if (vmx->nested.apic_access_page) {
vmx               260 arch/x86/kvm/vmx/nested.c 		kvm_release_page_dirty(vmx->nested.apic_access_page);
vmx               261 arch/x86/kvm/vmx/nested.c 		vmx->nested.apic_access_page = NULL;
vmx               263 arch/x86/kvm/vmx/nested.c 	kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
vmx               264 arch/x86/kvm/vmx/nested.c 	kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
vmx               265 arch/x86/kvm/vmx/nested.c 	vmx->nested.pi_desc = NULL;
vmx               271 arch/x86/kvm/vmx/nested.c 	free_loaded_vmcs(&vmx->nested.vmcs02);
vmx               274 arch/x86/kvm/vmx/nested.c static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx,
vmx               279 arch/x86/kvm/vmx/nested.c 	if (unlikely(!vmx->guest_state_loaded))
vmx               283 arch/x86/kvm/vmx/nested.c 	dest = &vmx->loaded_vmcs->host_state;
vmx               295 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx               299 arch/x86/kvm/vmx/nested.c 	if (vmx->loaded_vmcs == vmcs)
vmx               303 arch/x86/kvm/vmx/nested.c 	prev = vmx->loaded_vmcs;
vmx               304 arch/x86/kvm/vmx/nested.c 	vmx->loaded_vmcs = vmcs;
vmx               306 arch/x86/kvm/vmx/nested.c 	vmx_sync_vmcs_host_state(vmx, prev);
vmx               309 arch/x86/kvm/vmx/nested.c 	vmx_segment_cache_clear(vmx);
vmx               329 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx               333 arch/x86/kvm/vmx/nested.c 	if (vmx->nested.pml_full) {
vmx               335 arch/x86/kvm/vmx/nested.c 		vmx->nested.pml_full = false;
vmx               680 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx               686 arch/x86/kvm/vmx/nested.c 	kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer,
vmx               881 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx               882 arch/x86/kvm/vmx/nested.c 	u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
vmx               883 arch/x86/kvm/vmx/nested.c 				       vmx->nested.msrs.misc_high);
vmx              1043 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              1045 arch/x86/kvm/vmx/nested.c 	return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid;
vmx              1056 arch/x86/kvm/vmx/nested.c static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
vmx              1063 arch/x86/kvm/vmx/nested.c 	u64 vmx_basic = vmx->nested.msrs.basic;
vmx              1082 arch/x86/kvm/vmx/nested.c 	vmx->nested.msrs.basic = data;
vmx              1087 arch/x86/kvm/vmx/nested.c vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
vmx              1094 arch/x86/kvm/vmx/nested.c 		lowp = &vmx->nested.msrs.pinbased_ctls_low;
vmx              1095 arch/x86/kvm/vmx/nested.c 		highp = &vmx->nested.msrs.pinbased_ctls_high;
vmx              1098 arch/x86/kvm/vmx/nested.c 		lowp = &vmx->nested.msrs.procbased_ctls_low;
vmx              1099 arch/x86/kvm/vmx/nested.c 		highp = &vmx->nested.msrs.procbased_ctls_high;
vmx              1102 arch/x86/kvm/vmx/nested.c 		lowp = &vmx->nested.msrs.exit_ctls_low;
vmx              1103 arch/x86/kvm/vmx/nested.c 		highp = &vmx->nested.msrs.exit_ctls_high;
vmx              1106 arch/x86/kvm/vmx/nested.c 		lowp = &vmx->nested.msrs.entry_ctls_low;
vmx              1107 arch/x86/kvm/vmx/nested.c 		highp = &vmx->nested.msrs.entry_ctls_high;
vmx              1110 arch/x86/kvm/vmx/nested.c 		lowp = &vmx->nested.msrs.secondary_ctls_low;
vmx              1111 arch/x86/kvm/vmx/nested.c 		highp = &vmx->nested.msrs.secondary_ctls_high;
vmx              1132 arch/x86/kvm/vmx/nested.c static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
vmx              1142 arch/x86/kvm/vmx/nested.c 	vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
vmx              1143 arch/x86/kvm/vmx/nested.c 				   vmx->nested.msrs.misc_high);
vmx              1148 arch/x86/kvm/vmx/nested.c 	if ((vmx->nested.msrs.pinbased_ctls_high &
vmx              1163 arch/x86/kvm/vmx/nested.c 	vmx->nested.msrs.misc_low = data;
vmx              1164 arch/x86/kvm/vmx/nested.c 	vmx->nested.msrs.misc_high = data >> 32;
vmx              1169 arch/x86/kvm/vmx/nested.c static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
vmx              1173 arch/x86/kvm/vmx/nested.c 	vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps,
vmx              1174 arch/x86/kvm/vmx/nested.c 					   vmx->nested.msrs.vpid_caps);
vmx              1180 arch/x86/kvm/vmx/nested.c 	vmx->nested.msrs.ept_caps = data;
vmx              1181 arch/x86/kvm/vmx/nested.c 	vmx->nested.msrs.vpid_caps = data >> 32;
vmx              1185 arch/x86/kvm/vmx/nested.c static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
vmx              1191 arch/x86/kvm/vmx/nested.c 		msr = &vmx->nested.msrs.cr0_fixed0;
vmx              1194 arch/x86/kvm/vmx/nested.c 		msr = &vmx->nested.msrs.cr4_fixed0;
vmx              1218 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              1224 arch/x86/kvm/vmx/nested.c 	if (vmx->nested.vmxon)
vmx              1229 arch/x86/kvm/vmx/nested.c 		return vmx_restore_vmx_basic(vmx, data);
vmx              1249 arch/x86/kvm/vmx/nested.c 		return vmx_restore_control_msr(vmx, msr_index, data);
vmx              1251 arch/x86/kvm/vmx/nested.c 		return vmx_restore_vmx_misc(vmx, data);
vmx              1254 arch/x86/kvm/vmx/nested.c 		return vmx_restore_fixed0_msr(vmx, msr_index, data);
vmx              1263 arch/x86/kvm/vmx/nested.c 		return vmx_restore_vmx_ept_vpid_cap(vmx, data);
vmx              1265 arch/x86/kvm/vmx/nested.c 		vmx->nested.msrs.vmcs_enum = data;
vmx              1268 arch/x86/kvm/vmx/nested.c 		if (data & ~vmx->nested.msrs.vmfunc_controls)
vmx              1270 arch/x86/kvm/vmx/nested.c 		vmx->nested.msrs.vmfunc_controls = data;
vmx              1366 arch/x86/kvm/vmx/nested.c static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
vmx              1368 arch/x86/kvm/vmx/nested.c 	struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
vmx              1369 arch/x86/kvm/vmx/nested.c 	struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
vmx              1388 arch/x86/kvm/vmx/nested.c 	vmcs_load(vmx->loaded_vmcs->vmcs);
vmx              1393 arch/x86/kvm/vmx/nested.c static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
vmx              1403 arch/x86/kvm/vmx/nested.c 	struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
vmx              1404 arch/x86/kvm/vmx/nested.c 	struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
vmx              1424 arch/x86/kvm/vmx/nested.c 	vmcs_load(vmx->loaded_vmcs->vmcs);
vmx              1427 arch/x86/kvm/vmx/nested.c static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
vmx              1429 arch/x86/kvm/vmx/nested.c 	struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
vmx              1430 arch/x86/kvm/vmx/nested.c 	struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
vmx              1647 arch/x86/kvm/vmx/nested.c static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
vmx              1649 arch/x86/kvm/vmx/nested.c 	struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
vmx              1650 arch/x86/kvm/vmx/nested.c 	struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
vmx              1821 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              1825 arch/x86/kvm/vmx/nested.c 	if (likely(!vmx->nested.enlightened_vmcs_enabled))
vmx              1831 arch/x86/kvm/vmx/nested.c 	if (unlikely(!vmx->nested.hv_evmcs ||
vmx              1832 arch/x86/kvm/vmx/nested.c 		     evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
vmx              1833 arch/x86/kvm/vmx/nested.c 		if (!vmx->nested.hv_evmcs)
vmx              1834 arch/x86/kvm/vmx/nested.c 			vmx->nested.current_vmptr = -1ull;
vmx              1839 arch/x86/kvm/vmx/nested.c 				 &vmx->nested.hv_evmcs_map))
vmx              1842 arch/x86/kvm/vmx/nested.c 		vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva;
vmx              1866 arch/x86/kvm/vmx/nested.c 		if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) &&
vmx              1867 arch/x86/kvm/vmx/nested.c 		    (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) {
vmx              1872 arch/x86/kvm/vmx/nested.c 		vmx->nested.dirty_vmcs12 = true;
vmx              1873 arch/x86/kvm/vmx/nested.c 		vmx->nested.hv_evmcs_vmptr = evmcs_gpa;
vmx              1895 arch/x86/kvm/vmx/nested.c 		vmx->nested.hv_evmcs->hv_clean_fields &=
vmx              1903 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              1910 arch/x86/kvm/vmx/nested.c 	if (vmx->nested.enlightened_vmcs_enabled && !vmx->nested.hv_evmcs)
vmx              1913 arch/x86/kvm/vmx/nested.c 	if (vmx->nested.hv_evmcs) {
vmx              1914 arch/x86/kvm/vmx/nested.c 		copy_vmcs12_to_enlightened(vmx);
vmx              1916 arch/x86/kvm/vmx/nested.c 		vmx->nested.hv_evmcs->hv_clean_fields |=
vmx              1919 arch/x86/kvm/vmx/nested.c 		copy_vmcs12_to_shadow(vmx);
vmx              1922 arch/x86/kvm/vmx/nested.c 	vmx->nested.need_vmcs12_to_shadow_sync = false;
vmx              1927 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx =
vmx              1930 arch/x86/kvm/vmx/nested.c 	vmx->nested.preemption_timer_expired = true;
vmx              1931 arch/x86/kvm/vmx/nested.c 	kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
vmx              1932 arch/x86/kvm/vmx/nested.c 	kvm_vcpu_kick(&vmx->vcpu);
vmx              1940 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              1947 arch/x86/kvm/vmx/nested.c 		vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
vmx              1957 arch/x86/kvm/vmx/nested.c 	hrtimer_start(&vmx->nested.preemption_timer,
vmx              1961 arch/x86/kvm/vmx/nested.c static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
vmx              1963 arch/x86/kvm/vmx/nested.c 	if (vmx->nested.nested_run_pending &&
vmx              1967 arch/x86/kvm/vmx/nested.c 		return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME);
vmx              1969 arch/x86/kvm/vmx/nested.c 		return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME);
vmx              1972 arch/x86/kvm/vmx/nested.c static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
vmx              1980 arch/x86/kvm/vmx/nested.c 	if (vmx->nested.vmcs02_initialized)
vmx              1982 arch/x86/kvm/vmx/nested.c 	vmx->nested.vmcs02_initialized = true;
vmx              1990 arch/x86/kvm/vmx/nested.c 		vmcs_write64(EPT_POINTER, construct_eptp(&vmx->vcpu, 0));
vmx              2000 arch/x86/kvm/vmx/nested.c 		vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
vmx              2010 arch/x86/kvm/vmx/nested.c 		vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
vmx              2023 arch/x86/kvm/vmx/nested.c 	vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
vmx              2024 arch/x86/kvm/vmx/nested.c 	vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
vmx              2026 arch/x86/kvm/vmx/nested.c 	vmx_set_constant_host_state(vmx);
vmx              2029 arch/x86/kvm/vmx/nested.c static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx,
vmx              2032 arch/x86/kvm/vmx/nested.c 	prepare_vmcs02_constant_state(vmx);
vmx              2037 arch/x86/kvm/vmx/nested.c 		if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
vmx              2038 arch/x86/kvm/vmx/nested.c 			vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
vmx              2040 arch/x86/kvm/vmx/nested.c 			vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
vmx              2044 arch/x86/kvm/vmx/nested.c static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
vmx              2047 arch/x86/kvm/vmx/nested.c 	u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
vmx              2049 arch/x86/kvm/vmx/nested.c 	if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs)
vmx              2050 arch/x86/kvm/vmx/nested.c 		prepare_vmcs02_early_rare(vmx, vmcs12);
vmx              2055 arch/x86/kvm/vmx/nested.c 	exec_control = vmx_pin_based_exec_ctrl(vmx);
vmx              2061 arch/x86/kvm/vmx/nested.c 		vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
vmx              2062 arch/x86/kvm/vmx/nested.c 		vmx->nested.pi_pending = false;
vmx              2066 arch/x86/kvm/vmx/nested.c 	pin_controls_set(vmx, exec_control);
vmx              2071 arch/x86/kvm/vmx/nested.c 	exec_control = vmx_exec_control(vmx); /* L0's desires */
vmx              2099 arch/x86/kvm/vmx/nested.c 	exec_control |= exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS;
vmx              2101 arch/x86/kvm/vmx/nested.c 	exec_controls_set(vmx, exec_control);
vmx              2107 arch/x86/kvm/vmx/nested.c 		exec_control = vmx->secondary_exec_control;
vmx              2140 arch/x86/kvm/vmx/nested.c 		secondary_exec_controls_set(vmx, exec_control);
vmx              2159 arch/x86/kvm/vmx/nested.c 	vm_entry_controls_set(vmx, exec_control);
vmx              2171 arch/x86/kvm/vmx/nested.c 	vm_exit_controls_set(vmx, exec_control);
vmx              2176 arch/x86/kvm/vmx/nested.c 	if (vmx->nested.nested_run_pending) {
vmx              2185 arch/x86/kvm/vmx/nested.c 		vmx->loaded_vmcs->nmi_known_unmasked =
vmx              2192 arch/x86/kvm/vmx/nested.c static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
vmx              2194 arch/x86/kvm/vmx/nested.c 	struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
vmx              2255 arch/x86/kvm/vmx/nested.c 		if (kvm_mpx_supported() && vmx->nested.nested_run_pending &&
vmx              2289 arch/x86/kvm/vmx/nested.c 	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
vmx              2290 arch/x86/kvm/vmx/nested.c 	vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
vmx              2292 arch/x86/kvm/vmx/nested.c 	set_cr4_guest_host_mask(vmx);
vmx              2309 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              2310 arch/x86/kvm/vmx/nested.c 	struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
vmx              2313 arch/x86/kvm/vmx/nested.c 	if (vmx->nested.dirty_vmcs12 || hv_evmcs) {
vmx              2314 arch/x86/kvm/vmx/nested.c 		prepare_vmcs02_rare(vmx, vmcs12);
vmx              2315 arch/x86/kvm/vmx/nested.c 		vmx->nested.dirty_vmcs12 = false;
vmx              2322 arch/x86/kvm/vmx/nested.c 	if (vmx->nested.nested_run_pending &&
vmx              2328 arch/x86/kvm/vmx/nested.c 		vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
vmx              2330 arch/x86/kvm/vmx/nested.c 	if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending ||
vmx              2332 arch/x86/kvm/vmx/nested.c 		vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
vmx              2343 arch/x86/kvm/vmx/nested.c 	if (vmx->nested.nested_run_pending &&
vmx              2348 arch/x86/kvm/vmx/nested.c 		vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
vmx              2354 arch/x86/kvm/vmx/nested.c 		decache_tsc_multiplier(vmx);
vmx              2366 arch/x86/kvm/vmx/nested.c 			if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
vmx              2367 arch/x86/kvm/vmx/nested.c 				vmx->nested.last_vpid = vmcs12->virtual_processor_id;
vmx              2403 arch/x86/kvm/vmx/nested.c 	vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12);
vmx              2412 arch/x86/kvm/vmx/nested.c 	if (vmx->emulation_required) {
vmx              2464 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              2470 arch/x86/kvm/vmx/nested.c 		if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT)))
vmx              2474 arch/x86/kvm/vmx/nested.c 		if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT)))
vmx              2491 arch/x86/kvm/vmx/nested.c 		if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT)))
vmx              2504 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              2507 arch/x86/kvm/vmx/nested.c 				   vmx->nested.msrs.pinbased_ctls_low,
vmx              2508 arch/x86/kvm/vmx/nested.c 				   vmx->nested.msrs.pinbased_ctls_high)) ||
vmx              2510 arch/x86/kvm/vmx/nested.c 				   vmx->nested.msrs.procbased_ctls_low,
vmx              2511 arch/x86/kvm/vmx/nested.c 				   vmx->nested.msrs.procbased_ctls_high)))
vmx              2516 arch/x86/kvm/vmx/nested.c 				   vmx->nested.msrs.secondary_ctls_low,
vmx              2517 arch/x86/kvm/vmx/nested.c 				   vmx->nested.msrs.secondary_ctls_high)))
vmx              2544 arch/x86/kvm/vmx/nested.c 		       ~vmx->nested.msrs.vmfunc_controls))
vmx              2563 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              2566 arch/x86/kvm/vmx/nested.c 				    vmx->nested.msrs.exit_ctls_low,
vmx              2567 arch/x86/kvm/vmx/nested.c 				    vmx->nested.msrs.exit_ctls_high)) ||
vmx              2580 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              2583 arch/x86/kvm/vmx/nested.c 				    vmx->nested.msrs.entry_ctls_low,
vmx              2584 arch/x86/kvm/vmx/nested.c 				    vmx->nested.msrs.entry_ctls_high)))
vmx              2825 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              2832 arch/x86/kvm/vmx/nested.c 	if (vmx->msr_autoload.host.nr)
vmx              2834 arch/x86/kvm/vmx/nested.c 	if (vmx->msr_autoload.guest.nr)
vmx              2850 arch/x86/kvm/vmx/nested.c 	if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
vmx              2852 arch/x86/kvm/vmx/nested.c 		vmx->loaded_vmcs->host_state.cr3 = cr3;
vmx              2856 arch/x86/kvm/vmx/nested.c 	if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
vmx              2858 arch/x86/kvm/vmx/nested.c 		vmx->loaded_vmcs->host_state.cr4 = cr4;
vmx              2884 arch/x86/kvm/vmx/nested.c 		[loaded_vmcs]"r"(vmx->loaded_vmcs),
vmx              2891 arch/x86/kvm/vmx/nested.c 	if (vmx->msr_autoload.host.nr)
vmx              2892 arch/x86/kvm/vmx/nested.c 		vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
vmx              2893 arch/x86/kvm/vmx/nested.c 	if (vmx->msr_autoload.guest.nr)
vmx              2894 arch/x86/kvm/vmx/nested.c 		vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
vmx              2934 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              2946 arch/x86/kvm/vmx/nested.c 		if (vmx->nested.apic_access_page) { /* shouldn't happen */
vmx              2947 arch/x86/kvm/vmx/nested.c 			kvm_release_page_dirty(vmx->nested.apic_access_page);
vmx              2948 arch/x86/kvm/vmx/nested.c 			vmx->nested.apic_access_page = NULL;
vmx              2952 arch/x86/kvm/vmx/nested.c 			vmx->nested.apic_access_page = page;
vmx              2953 arch/x86/kvm/vmx/nested.c 			hpa = page_to_phys(vmx->nested.apic_access_page);
vmx              2967 arch/x86/kvm/vmx/nested.c 		map = &vmx->nested.virtual_apic_map;
vmx              2982 arch/x86/kvm/vmx/nested.c 			exec_controls_clearbit(vmx, CPU_BASED_TPR_SHADOW);
vmx              2993 arch/x86/kvm/vmx/nested.c 		map = &vmx->nested.pi_desc_map;
vmx              2996 arch/x86/kvm/vmx/nested.c 			vmx->nested.pi_desc =
vmx              3004 arch/x86/kvm/vmx/nested.c 		exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
vmx              3006 arch/x86/kvm/vmx/nested.c 		exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
vmx              3056 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              3062 arch/x86/kvm/vmx/nested.c 	evaluate_pending_interrupts = exec_controls_get(vmx) &
vmx              3068 arch/x86/kvm/vmx/nested.c 		vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
vmx              3071 arch/x86/kvm/vmx/nested.c 		vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
vmx              3092 arch/x86/kvm/vmx/nested.c 	vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
vmx              3094 arch/x86/kvm/vmx/nested.c 	prepare_vmcs02_early(vmx, vmcs12);
vmx              3101 arch/x86/kvm/vmx/nested.c 			vmx_switch_vmcs(vcpu, &vmx->vmcs01);
vmx              3156 arch/x86/kvm/vmx/nested.c 	vmx->nested.preemption_timer_expired = false;
vmx              3179 arch/x86/kvm/vmx/nested.c 	vmx_switch_vmcs(vcpu, &vmx->vmcs01);
vmx              3187 arch/x86/kvm/vmx/nested.c 	if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
vmx              3188 arch/x86/kvm/vmx/nested.c 		vmx->nested.need_vmcs12_to_shadow_sync = true;
vmx              3200 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              3209 arch/x86/kvm/vmx/nested.c 	if (!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull)
vmx              3223 arch/x86/kvm/vmx/nested.c 	if (vmx->nested.hv_evmcs) {
vmx              3224 arch/x86/kvm/vmx/nested.c 		copy_enlightened_to_vmcs12(vmx);
vmx              3228 arch/x86/kvm/vmx/nested.c 		copy_shadow_to_vmcs12(vmx);
vmx              3260 arch/x86/kvm/vmx/nested.c 	vmx->nested.nested_run_pending = 1;
vmx              3266 arch/x86/kvm/vmx/nested.c 	vmx->vcpu.arch.l1tf_flush_l1d = true;
vmx              3290 arch/x86/kvm/vmx/nested.c 		vmx->nested.nested_run_pending = 0;
vmx              3296 arch/x86/kvm/vmx/nested.c 	vmx->nested.nested_run_pending = 0;
vmx              3408 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              3413 arch/x86/kvm/vmx/nested.c 	if (!vmx->nested.pi_desc || !vmx->nested.pi_pending)
vmx              3416 arch/x86/kvm/vmx/nested.c 	vmx->nested.pi_pending = false;
vmx              3417 arch/x86/kvm/vmx/nested.c 	if (!pi_test_and_clear_on(vmx->nested.pi_desc))
vmx              3420 arch/x86/kvm/vmx/nested.c 	max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
vmx              3422 arch/x86/kvm/vmx/nested.c 		vapic_page = vmx->nested.virtual_apic_map.hva;
vmx              3426 arch/x86/kvm/vmx/nested.c 		__kvm_apic_update_irr(vmx->nested.pi_desc->pir,
vmx              3465 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              3468 arch/x86/kvm/vmx/nested.c 	    vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
vmx              3488 arch/x86/kvm/vmx/nested.c 	    vmx->nested.preemption_timer_expired) {
vmx              3585 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              3626 arch/x86/kvm/vmx/nested.c 	vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false;
vmx              3632 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              3635 arch/x86/kvm/vmx/nested.c 	if (!vmx->nested.need_sync_vmcs02_to_vmcs12_rare)
vmx              3639 arch/x86/kvm/vmx/nested.c 	WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01);
vmx              3642 arch/x86/kvm/vmx/nested.c 	vmx->loaded_vmcs = &vmx->nested.vmcs02;
vmx              3643 arch/x86/kvm/vmx/nested.c 	vmx_vcpu_load(&vmx->vcpu, cpu);
vmx              3647 arch/x86/kvm/vmx/nested.c 	vmx->loaded_vmcs = &vmx->vmcs01;
vmx              3648 arch/x86/kvm/vmx/nested.c 	vmx_vcpu_load(&vmx->vcpu, cpu);
vmx              3660 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              3662 arch/x86/kvm/vmx/nested.c 	if (vmx->nested.hv_evmcs)
vmx              3665 arch/x86/kvm/vmx/nested.c 	vmx->nested.need_sync_vmcs02_to_vmcs12_rare = !vmx->nested.hv_evmcs;
vmx              3937 arch/x86/kvm/vmx/nested.c static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
vmx              3942 arch/x86/kvm/vmx/nested.c 	if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER)
vmx              3948 arch/x86/kvm/vmx/nested.c 	for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) {
vmx              3949 arch/x86/kvm/vmx/nested.c 		if (vmx->msr_autoload.guest.val[i].index == MSR_EFER)
vmx              3950 arch/x86/kvm/vmx/nested.c 			return vmx->msr_autoload.guest.val[i].value;
vmx              3953 arch/x86/kvm/vmx/nested.c 	efer_msr = find_msr_entry(vmx, MSR_EFER);
vmx              3963 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              3987 arch/x86/kvm/vmx/nested.c 	vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
vmx              4076 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              4080 arch/x86/kvm/vmx/nested.c 	WARN_ON_ONCE(vmx->nested.nested_run_pending);
vmx              4090 arch/x86/kvm/vmx/nested.c 	if (likely(!vmx->fail)) {
vmx              4119 arch/x86/kvm/vmx/nested.c 	vmx_switch_vmcs(vcpu, &vmx->vmcs01);
vmx              4122 arch/x86/kvm/vmx/nested.c 	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
vmx              4123 arch/x86/kvm/vmx/nested.c 	vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
vmx              4127 arch/x86/kvm/vmx/nested.c 		decache_tsc_multiplier(vmx);
vmx              4129 arch/x86/kvm/vmx/nested.c 	if (vmx->nested.change_vmcs01_virtual_apic_mode) {
vmx              4130 arch/x86/kvm/vmx/nested.c 		vmx->nested.change_vmcs01_virtual_apic_mode = false;
vmx              4139 arch/x86/kvm/vmx/nested.c 	if (vmx->nested.apic_access_page) {
vmx              4140 arch/x86/kvm/vmx/nested.c 		kvm_release_page_dirty(vmx->nested.apic_access_page);
vmx              4141 arch/x86/kvm/vmx/nested.c 		vmx->nested.apic_access_page = NULL;
vmx              4143 arch/x86/kvm/vmx/nested.c 	kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
vmx              4144 arch/x86/kvm/vmx/nested.c 	kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
vmx              4145 arch/x86/kvm/vmx/nested.c 	vmx->nested.pi_desc = NULL;
vmx              4153 arch/x86/kvm/vmx/nested.c 	if ((exit_reason != -1) && (enable_shadow_vmcs || vmx->nested.hv_evmcs))
vmx              4154 arch/x86/kvm/vmx/nested.c 		vmx->nested.need_vmcs12_to_shadow_sync = true;
vmx              4159 arch/x86/kvm/vmx/nested.c 	if (likely(!vmx->fail)) {
vmx              4198 arch/x86/kvm/vmx/nested.c 	vmx->fail = 0;
vmx              4356 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              4357 arch/x86/kvm/vmx/nested.c 	struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs;
vmx              4365 arch/x86/kvm/vmx/nested.c 	WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs);
vmx              4377 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              4380 arch/x86/kvm/vmx/nested.c 	r = alloc_loaded_vmcs(&vmx->nested.vmcs02);
vmx              4384 arch/x86/kvm/vmx/nested.c 	vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
vmx              4385 arch/x86/kvm/vmx/nested.c 	if (!vmx->nested.cached_vmcs12)
vmx              4388 arch/x86/kvm/vmx/nested.c 	vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
vmx              4389 arch/x86/kvm/vmx/nested.c 	if (!vmx->nested.cached_shadow_vmcs12)
vmx              4395 arch/x86/kvm/vmx/nested.c 	hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
vmx              4397 arch/x86/kvm/vmx/nested.c 	vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
vmx              4399 arch/x86/kvm/vmx/nested.c 	vmx->nested.vpid02 = allocate_vpid();
vmx              4401 arch/x86/kvm/vmx/nested.c 	vmx->nested.vmcs02_initialized = false;
vmx              4402 arch/x86/kvm/vmx/nested.c 	vmx->nested.vmxon = true;
vmx              4405 arch/x86/kvm/vmx/nested.c 		vmx->pt_desc.guest.ctl = 0;
vmx              4406 arch/x86/kvm/vmx/nested.c 		pt_update_intercept_for_msr(vmx);
vmx              4412 arch/x86/kvm/vmx/nested.c 	kfree(vmx->nested.cached_shadow_vmcs12);
vmx              4415 arch/x86/kvm/vmx/nested.c 	kfree(vmx->nested.cached_vmcs12);
vmx              4418 arch/x86/kvm/vmx/nested.c 	free_loaded_vmcs(&vmx->nested.vmcs02);
vmx              4437 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              4461 arch/x86/kvm/vmx/nested.c 	if (vmx->nested.vmxon)
vmx              4465 arch/x86/kvm/vmx/nested.c 	if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
vmx              4489 arch/x86/kvm/vmx/nested.c 	vmx->nested.vmxon_ptr = vmptr;
vmx              4499 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              4501 arch/x86/kvm/vmx/nested.c 	if (vmx->nested.current_vmptr == -1ull)
vmx              4509 arch/x86/kvm/vmx/nested.c 		copy_shadow_to_vmcs12(vmx);
vmx              4510 arch/x86/kvm/vmx/nested.c 		vmx_disable_shadow_vmcs(vmx);
vmx              4512 arch/x86/kvm/vmx/nested.c 	vmx->nested.posted_intr_nv = -1;
vmx              4516 arch/x86/kvm/vmx/nested.c 				  vmx->nested.current_vmptr >> PAGE_SHIFT,
vmx              4517 arch/x86/kvm/vmx/nested.c 				  vmx->nested.cached_vmcs12, 0, VMCS12_SIZE);
vmx              4521 arch/x86/kvm/vmx/nested.c 	vmx->nested.current_vmptr = -1ull;
vmx              4541 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              4556 arch/x86/kvm/vmx/nested.c 	if (vmptr == vmx->nested.vmxon_ptr)
vmx              4570 arch/x86/kvm/vmx/nested.c 	if (likely(!vmx->nested.enlightened_vmcs_enabled ||
vmx              4572 arch/x86/kvm/vmx/nested.c 		if (vmptr == vmx->nested.current_vmptr)
vmx              4603 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              4620 arch/x86/kvm/vmx/nested.c 	if (vmx->nested.current_vmptr == -1ull ||
vmx              4691 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              4714 arch/x86/kvm/vmx/nested.c 	if (vmx->nested.current_vmptr == -1ull ||
vmx              4783 arch/x86/kvm/vmx/nested.c 			vmcs_load(vmx->vmcs01.shadow_vmcs);
vmx              4787 arch/x86/kvm/vmx/nested.c 			vmcs_clear(vmx->vmcs01.shadow_vmcs);
vmx              4788 arch/x86/kvm/vmx/nested.c 			vmcs_load(vmx->loaded_vmcs->vmcs);
vmx              4791 arch/x86/kvm/vmx/nested.c 		vmx->nested.dirty_vmcs12 = true;
vmx              4797 arch/x86/kvm/vmx/nested.c static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
vmx              4799 arch/x86/kvm/vmx/nested.c 	vmx->nested.current_vmptr = vmptr;
vmx              4801 arch/x86/kvm/vmx/nested.c 		secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
vmx              4803 arch/x86/kvm/vmx/nested.c 			     __pa(vmx->vmcs01.shadow_vmcs));
vmx              4804 arch/x86/kvm/vmx/nested.c 		vmx->nested.need_vmcs12_to_shadow_sync = true;
vmx              4806 arch/x86/kvm/vmx/nested.c 	vmx->nested.dirty_vmcs12 = true;
vmx              4812 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              4825 arch/x86/kvm/vmx/nested.c 	if (vmptr == vmx->nested.vmxon_ptr)
vmx              4830 arch/x86/kvm/vmx/nested.c 	if (vmx->nested.hv_evmcs)
vmx              4833 arch/x86/kvm/vmx/nested.c 	if (vmx->nested.current_vmptr != vmptr) {
vmx              4864 arch/x86/kvm/vmx/nested.c 		memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE);
vmx              4867 arch/x86/kvm/vmx/nested.c 		set_current_vmptr(vmx, vmptr);
vmx              4903 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              4912 arch/x86/kvm/vmx/nested.c 	if (!(vmx->nested.msrs.secondary_ctls_high &
vmx              4914 arch/x86/kvm/vmx/nested.c 	    !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) {
vmx              4925 arch/x86/kvm/vmx/nested.c 	types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
vmx              4960 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              4971 arch/x86/kvm/vmx/nested.c 	if (!(vmx->nested.msrs.secondary_ctls_high &
vmx              4973 arch/x86/kvm/vmx/nested.c 			!(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) {
vmx              4984 arch/x86/kvm/vmx/nested.c 	types = (vmx->nested.msrs.vpid_caps &
vmx              5083 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              5112 arch/x86/kvm/vmx/nested.c 	nested_vmx_vmexit(vcpu, vmx->exit_reason,
vmx              5327 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              5330 arch/x86/kvm/vmx/nested.c 	if (vmx->nested.nested_run_pending)
vmx              5333 arch/x86/kvm/vmx/nested.c 	if (unlikely(vmx->fail)) {
vmx              5355 arch/x86/kvm/vmx/nested.c 				vmx->idt_vectoring_info,
vmx              5365 arch/x86/kvm/vmx/nested.c 			return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept;
vmx              5510 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx;
vmx              5516 arch/x86/kvm/vmx/nested.c 		.hdr.vmx.vmxon_pa = -1ull,
vmx              5517 arch/x86/kvm/vmx/nested.c 		.hdr.vmx.vmcs12_pa = -1ull,
vmx              5520 arch/x86/kvm/vmx/nested.c 		&user_kvm_nested_state->data.vmx[0];
vmx              5525 arch/x86/kvm/vmx/nested.c 	vmx = to_vmx(vcpu);
vmx              5529 arch/x86/kvm/vmx/nested.c 	    (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
vmx              5530 arch/x86/kvm/vmx/nested.c 		kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
vmx              5531 arch/x86/kvm/vmx/nested.c 		kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr;
vmx              5536 arch/x86/kvm/vmx/nested.c 			if (vmx->nested.hv_evmcs)
vmx              5545 arch/x86/kvm/vmx/nested.c 		if (vmx->nested.smm.vmxon)
vmx              5546 arch/x86/kvm/vmx/nested.c 			kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
vmx              5548 arch/x86/kvm/vmx/nested.c 		if (vmx->nested.smm.guest_mode)
vmx              5549 arch/x86/kvm/vmx/nested.c 			kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
vmx              5554 arch/x86/kvm/vmx/nested.c 			if (vmx->nested.nested_run_pending)
vmx              5578 arch/x86/kvm/vmx/nested.c 	} else if (!vmx->nested.need_vmcs12_to_shadow_sync) {
vmx              5579 arch/x86/kvm/vmx/nested.c 		if (vmx->nested.hv_evmcs)
vmx              5580 arch/x86/kvm/vmx/nested.c 			copy_enlightened_to_vmcs12(vmx);
vmx              5582 arch/x86/kvm/vmx/nested.c 			copy_shadow_to_vmcs12(vmx);
vmx              5622 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              5626 arch/x86/kvm/vmx/nested.c 		&user_kvm_nested_state->data.vmx[0];
vmx              5632 arch/x86/kvm/vmx/nested.c 	if (kvm_state->hdr.vmx.vmxon_pa == -1ull) {
vmx              5633 arch/x86/kvm/vmx/nested.c 		if (kvm_state->hdr.vmx.smm.flags)
vmx              5636 arch/x86/kvm/vmx/nested.c 		if (kvm_state->hdr.vmx.vmcs12_pa != -1ull)
vmx              5654 arch/x86/kvm/vmx/nested.c 		if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa))
vmx              5658 arch/x86/kvm/vmx/nested.c 	if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
vmx              5662 arch/x86/kvm/vmx/nested.c 	if (kvm_state->hdr.vmx.smm.flags &
vmx              5674 arch/x86/kvm/vmx/nested.c 		: kvm_state->hdr.vmx.smm.flags)
vmx              5677 arch/x86/kvm/vmx/nested.c 	if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
vmx              5678 arch/x86/kvm/vmx/nested.c 	    !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
vmx              5682 arch/x86/kvm/vmx/nested.c 		(!nested_vmx_allowed(vcpu) || !vmx->nested.enlightened_vmcs_enabled))
vmx              5687 arch/x86/kvm/vmx/nested.c 	if (kvm_state->hdr.vmx.vmxon_pa == -1ull)
vmx              5690 arch/x86/kvm/vmx/nested.c 	vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa;
vmx              5699 arch/x86/kvm/vmx/nested.c 	if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) {
vmx              5700 arch/x86/kvm/vmx/nested.c 		if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa ||
vmx              5701 arch/x86/kvm/vmx/nested.c 		    !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa))
vmx              5704 arch/x86/kvm/vmx/nested.c 		set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa);
vmx              5710 arch/x86/kvm/vmx/nested.c 		vmx->nested.need_vmcs12_to_shadow_sync = true;
vmx              5715 arch/x86/kvm/vmx/nested.c 	if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
vmx              5716 arch/x86/kvm/vmx/nested.c 		vmx->nested.smm.vmxon = true;
vmx              5717 arch/x86/kvm/vmx/nested.c 		vmx->nested.vmxon = false;
vmx              5719 arch/x86/kvm/vmx/nested.c 		if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
vmx              5720 arch/x86/kvm/vmx/nested.c 			vmx->nested.smm.guest_mode = true;
vmx              5733 arch/x86/kvm/vmx/nested.c 	vmx->nested.nested_run_pending =
vmx              5763 arch/x86/kvm/vmx/nested.c 	vmx->nested.dirty_vmcs12 = true;
vmx              5771 arch/x86/kvm/vmx/nested.c 	vmx->nested.nested_run_pending = 0;
vmx                50 arch/x86/kvm/vmx/nested.h 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx                58 arch/x86/kvm/vmx/nested.h 	return is_guest_mode(vcpu) || vmx->nested.current_vmptr != -1ull ||
vmx                59 arch/x86/kvm/vmx/nested.h 		vmx->nested.hv_evmcs;
vmx               621 arch/x86/kvm/vmx/vmx.c static inline int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
vmx               625 arch/x86/kvm/vmx/vmx.c 	for (i = 0; i < vmx->nmsrs; ++i)
vmx               626 arch/x86/kvm/vmx/vmx.c 		if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
vmx               631 arch/x86/kvm/vmx/vmx.c struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
vmx               635 arch/x86/kvm/vmx/vmx.c 	i = __find_msr_index(vmx, msr);
vmx               637 arch/x86/kvm/vmx/vmx.c 		return &vmx->guest_msrs[i];
vmx               700 arch/x86/kvm/vmx/vmx.c static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
vmx               706 arch/x86/kvm/vmx/vmx.c 	if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {
vmx               707 arch/x86/kvm/vmx/vmx.c 		vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS);
vmx               708 arch/x86/kvm/vmx/vmx.c 		vmx->segment_cache.bitmask = 0;
vmx               710 arch/x86/kvm/vmx/vmx.c 	ret = vmx->segment_cache.bitmask & mask;
vmx               711 arch/x86/kvm/vmx/vmx.c 	vmx->segment_cache.bitmask |= mask;
vmx               715 arch/x86/kvm/vmx/vmx.c static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
vmx               717 arch/x86/kvm/vmx/vmx.c 	u16 *p = &vmx->segment_cache.seg[seg].selector;
vmx               719 arch/x86/kvm/vmx/vmx.c 	if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
vmx               724 arch/x86/kvm/vmx/vmx.c static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
vmx               726 arch/x86/kvm/vmx/vmx.c 	ulong *p = &vmx->segment_cache.seg[seg].base;
vmx               728 arch/x86/kvm/vmx/vmx.c 	if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
vmx               733 arch/x86/kvm/vmx/vmx.c static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
vmx               735 arch/x86/kvm/vmx/vmx.c 	u32 *p = &vmx->segment_cache.seg[seg].limit;
vmx               737 arch/x86/kvm/vmx/vmx.c 	if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
vmx               742 arch/x86/kvm/vmx/vmx.c static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
vmx               744 arch/x86/kvm/vmx/vmx.c 	u32 *p = &vmx->segment_cache.seg[seg].ar;
vmx               746 arch/x86/kvm/vmx/vmx.c 	if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
vmx               808 arch/x86/kvm/vmx/vmx.c static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
vmx               811 arch/x86/kvm/vmx/vmx.c 	vm_entry_controls_clearbit(vmx, entry);
vmx               812 arch/x86/kvm/vmx/vmx.c 	vm_exit_controls_clearbit(vmx, exit);
vmx               826 arch/x86/kvm/vmx/vmx.c static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
vmx               829 arch/x86/kvm/vmx/vmx.c 	struct msr_autoload *m = &vmx->msr_autoload;
vmx               834 arch/x86/kvm/vmx/vmx.c 			clear_atomic_switch_msr_special(vmx,
vmx               842 arch/x86/kvm/vmx/vmx.c 			clear_atomic_switch_msr_special(vmx,
vmx               866 arch/x86/kvm/vmx/vmx.c static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
vmx               874 arch/x86/kvm/vmx/vmx.c 	vm_entry_controls_setbit(vmx, entry);
vmx               875 arch/x86/kvm/vmx/vmx.c 	vm_exit_controls_setbit(vmx, exit);
vmx               878 arch/x86/kvm/vmx/vmx.c static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
vmx               882 arch/x86/kvm/vmx/vmx.c 	struct msr_autoload *m = &vmx->msr_autoload;
vmx               887 arch/x86/kvm/vmx/vmx.c 			add_atomic_switch_msr_special(vmx,
vmx               898 arch/x86/kvm/vmx/vmx.c 			add_atomic_switch_msr_special(vmx,
vmx               944 arch/x86/kvm/vmx/vmx.c static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
vmx               946 arch/x86/kvm/vmx/vmx.c 	u64 guest_efer = vmx->vcpu.arch.efer;
vmx               970 arch/x86/kvm/vmx/vmx.c 	    (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
vmx               974 arch/x86/kvm/vmx/vmx.c 			add_atomic_switch_msr(vmx, MSR_EFER,
vmx               977 arch/x86/kvm/vmx/vmx.c 			clear_atomic_switch_msr(vmx, MSR_EFER);
vmx               980 arch/x86/kvm/vmx/vmx.c 		clear_atomic_switch_msr(vmx, MSR_EFER);
vmx               985 arch/x86/kvm/vmx/vmx.c 		vmx->guest_msrs[efer_offset].data = guest_efer;
vmx               986 arch/x86/kvm/vmx/vmx.c 		vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
vmx              1049 arch/x86/kvm/vmx/vmx.c static void pt_guest_enter(struct vcpu_vmx *vmx)
vmx              1058 arch/x86/kvm/vmx/vmx.c 	rdmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
vmx              1059 arch/x86/kvm/vmx/vmx.c 	if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) {
vmx              1061 arch/x86/kvm/vmx/vmx.c 		pt_save_msr(&vmx->pt_desc.host, vmx->pt_desc.addr_range);
vmx              1062 arch/x86/kvm/vmx/vmx.c 		pt_load_msr(&vmx->pt_desc.guest, vmx->pt_desc.addr_range);
vmx              1066 arch/x86/kvm/vmx/vmx.c static void pt_guest_exit(struct vcpu_vmx *vmx)
vmx              1071 arch/x86/kvm/vmx/vmx.c 	if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) {
vmx              1072 arch/x86/kvm/vmx/vmx.c 		pt_save_msr(&vmx->pt_desc.guest, vmx->pt_desc.addr_range);
vmx              1073 arch/x86/kvm/vmx/vmx.c 		pt_load_msr(&vmx->pt_desc.host, vmx->pt_desc.addr_range);
vmx              1077 arch/x86/kvm/vmx/vmx.c 	wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
vmx              1109 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              1118 arch/x86/kvm/vmx/vmx.c 	vmx->req_immediate_exit = false;
vmx              1125 arch/x86/kvm/vmx/vmx.c 	if (!vmx->guest_msrs_ready) {
vmx              1126 arch/x86/kvm/vmx/vmx.c 		vmx->guest_msrs_ready = true;
vmx              1127 arch/x86/kvm/vmx/vmx.c 		for (i = 0; i < vmx->save_nmsrs; ++i)
vmx              1128 arch/x86/kvm/vmx/vmx.c 			kvm_set_shared_msr(vmx->guest_msrs[i].index,
vmx              1129 arch/x86/kvm/vmx/vmx.c 					   vmx->guest_msrs[i].data,
vmx              1130 arch/x86/kvm/vmx/vmx.c 					   vmx->guest_msrs[i].mask);
vmx              1133 arch/x86/kvm/vmx/vmx.c 	if (vmx->guest_state_loaded)
vmx              1136 arch/x86/kvm/vmx/vmx.c 	host_state = &vmx->loaded_vmcs->host_state;
vmx              1154 arch/x86/kvm/vmx/vmx.c 		vmx->msr_host_kernel_gs_base = current->thread.gsbase;
vmx              1159 arch/x86/kvm/vmx/vmx.c 		vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
vmx              1162 arch/x86/kvm/vmx/vmx.c 	wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
vmx              1171 arch/x86/kvm/vmx/vmx.c 	vmx->guest_state_loaded = true;
vmx              1174 arch/x86/kvm/vmx/vmx.c static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
vmx              1178 arch/x86/kvm/vmx/vmx.c 	if (!vmx->guest_state_loaded)
vmx              1181 arch/x86/kvm/vmx/vmx.c 	host_state = &vmx->loaded_vmcs->host_state;
vmx              1183 arch/x86/kvm/vmx/vmx.c 	++vmx->vcpu.stat.host_state_reload;
vmx              1186 arch/x86/kvm/vmx/vmx.c 	rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
vmx              1206 arch/x86/kvm/vmx/vmx.c 	wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
vmx              1209 arch/x86/kvm/vmx/vmx.c 	vmx->guest_state_loaded = false;
vmx              1210 arch/x86/kvm/vmx/vmx.c 	vmx->guest_msrs_ready = false;
vmx              1214 arch/x86/kvm/vmx/vmx.c static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
vmx              1217 arch/x86/kvm/vmx/vmx.c 	if (vmx->guest_state_loaded)
vmx              1218 arch/x86/kvm/vmx/vmx.c 		rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
vmx              1220 arch/x86/kvm/vmx/vmx.c 	return vmx->msr_guest_kernel_gs_base;
vmx              1223 arch/x86/kvm/vmx/vmx.c static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
vmx              1226 arch/x86/kvm/vmx/vmx.c 	if (vmx->guest_state_loaded)
vmx              1229 arch/x86/kvm/vmx/vmx.c 	vmx->msr_guest_kernel_gs_base = data;
vmx              1292 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              1293 arch/x86/kvm/vmx/vmx.c 	bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
vmx              1297 arch/x86/kvm/vmx/vmx.c 		loaded_vmcs_clear(vmx->loaded_vmcs);
vmx              1308 arch/x86/kvm/vmx/vmx.c 		list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
vmx              1314 arch/x86/kvm/vmx/vmx.c 	if (prev != vmx->loaded_vmcs->vmcs) {
vmx              1315 arch/x86/kvm/vmx/vmx.c 		per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
vmx              1316 arch/x86/kvm/vmx/vmx.c 		vmcs_load(vmx->loaded_vmcs->vmcs);
vmx              1352 arch/x86/kvm/vmx/vmx.c 		vmx->loaded_vmcs->cpu = cpu;
vmx              1357 arch/x86/kvm/vmx/vmx.c 	    vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
vmx              1358 arch/x86/kvm/vmx/vmx.c 		decache_tsc_multiplier(vmx);
vmx              1367 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              1373 arch/x86/kvm/vmx/vmx.c 	vmx->host_debugctlmsr = get_debugctlmsr();
vmx              1468 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              1475 arch/x86/kvm/vmx/vmx.c 	if (data & vmx->pt_desc.ctl_bitmask)
vmx              1482 arch/x86/kvm/vmx/vmx.c 	if ((vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) &&
vmx              1483 arch/x86/kvm/vmx/vmx.c 		((vmx->pt_desc.guest.ctl ^ data) & ~RTIT_CTL_TRACEEN))
vmx              1493 arch/x86/kvm/vmx/vmx.c 		!intel_pt_validate_cap(vmx->pt_desc.caps,
vmx              1501 arch/x86/kvm/vmx/vmx.c 	value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc_periods);
vmx              1502 arch/x86/kvm/vmx/vmx.c 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc) &&
vmx              1506 arch/x86/kvm/vmx/vmx.c 	value = intel_pt_validate_cap(vmx->pt_desc.caps,
vmx              1508 arch/x86/kvm/vmx/vmx.c 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) &&
vmx              1512 arch/x86/kvm/vmx/vmx.c 	value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_periods);
vmx              1513 arch/x86/kvm/vmx/vmx.c 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) &&
vmx              1523 arch/x86/kvm/vmx/vmx.c 	if ((value && (vmx->pt_desc.addr_range < 1)) || (value > 2))
vmx              1526 arch/x86/kvm/vmx/vmx.c 	if ((value && (vmx->pt_desc.addr_range < 2)) || (value > 2))
vmx              1529 arch/x86/kvm/vmx/vmx.c 	if ((value && (vmx->pt_desc.addr_range < 3)) || (value > 2))
vmx              1532 arch/x86/kvm/vmx/vmx.c 	if ((value && (vmx->pt_desc.addr_range < 4)) || (value > 2))
vmx              1581 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              1594 arch/x86/kvm/vmx/vmx.c 	if (vmx->rmode.vm86_active) {
vmx              1602 arch/x86/kvm/vmx/vmx.c 	WARN_ON_ONCE(vmx->emulation_required);
vmx              1606 arch/x86/kvm/vmx/vmx.c 			     vmx->vcpu.arch.event_exit_inst_len);
vmx              1629 arch/x86/kvm/vmx/vmx.c static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
vmx              1633 arch/x86/kvm/vmx/vmx.c 	tmp = vmx->guest_msrs[to];
vmx              1634 arch/x86/kvm/vmx/vmx.c 	vmx->guest_msrs[to] = vmx->guest_msrs[from];
vmx              1635 arch/x86/kvm/vmx/vmx.c 	vmx->guest_msrs[from] = tmp;
vmx              1643 arch/x86/kvm/vmx/vmx.c static void setup_msrs(struct vcpu_vmx *vmx)
vmx              1653 arch/x86/kvm/vmx/vmx.c 	if (is_long_mode(&vmx->vcpu) && (vmx->vcpu.arch.efer & EFER_SCE)) {
vmx              1654 arch/x86/kvm/vmx/vmx.c 		index = __find_msr_index(vmx, MSR_STAR);
vmx              1656 arch/x86/kvm/vmx/vmx.c 			move_msr_up(vmx, index, save_nmsrs++);
vmx              1657 arch/x86/kvm/vmx/vmx.c 		index = __find_msr_index(vmx, MSR_LSTAR);
vmx              1659 arch/x86/kvm/vmx/vmx.c 			move_msr_up(vmx, index, save_nmsrs++);
vmx              1660 arch/x86/kvm/vmx/vmx.c 		index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
vmx              1662 arch/x86/kvm/vmx/vmx.c 			move_msr_up(vmx, index, save_nmsrs++);
vmx              1665 arch/x86/kvm/vmx/vmx.c 	index = __find_msr_index(vmx, MSR_EFER);
vmx              1666 arch/x86/kvm/vmx/vmx.c 	if (index >= 0 && update_transition_efer(vmx, index))
vmx              1667 arch/x86/kvm/vmx/vmx.c 		move_msr_up(vmx, index, save_nmsrs++);
vmx              1668 arch/x86/kvm/vmx/vmx.c 	index = __find_msr_index(vmx, MSR_TSC_AUX);
vmx              1669 arch/x86/kvm/vmx/vmx.c 	if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
vmx              1670 arch/x86/kvm/vmx/vmx.c 		move_msr_up(vmx, index, save_nmsrs++);
vmx              1672 arch/x86/kvm/vmx/vmx.c 	vmx->save_nmsrs = save_nmsrs;
vmx              1673 arch/x86/kvm/vmx/vmx.c 	vmx->guest_msrs_ready = false;
vmx              1676 arch/x86/kvm/vmx/vmx.c 		vmx_update_msr_bitmap(&vmx->vcpu);
vmx              1752 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              1765 arch/x86/kvm/vmx/vmx.c 		msr_info->data = vmx_read_guest_kernel_gs_base(vmx);
vmx              1771 arch/x86/kvm/vmx/vmx.c 		if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx))
vmx              1774 arch/x86/kvm/vmx/vmx.c 		msr_info->data = vmx->msr_ia32_umwait_control;
vmx              1801 arch/x86/kvm/vmx/vmx.c 		    !(vmx->msr_ia32_feature_control &
vmx              1807 arch/x86/kvm/vmx/vmx.c 		msr_info->data = vmx->msr_ia32_feature_control;
vmx              1812 arch/x86/kvm/vmx/vmx.c 		return vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
vmx              1825 arch/x86/kvm/vmx/vmx.c 		msr_info->data = vmx->pt_desc.guest.ctl;
vmx              1830 arch/x86/kvm/vmx/vmx.c 		msr_info->data = vmx->pt_desc.guest.status;
vmx              1834 arch/x86/kvm/vmx/vmx.c 			!intel_pt_validate_cap(vmx->pt_desc.caps,
vmx              1837 arch/x86/kvm/vmx/vmx.c 		msr_info->data = vmx->pt_desc.guest.cr3_match;
vmx              1841 arch/x86/kvm/vmx/vmx.c 			(!intel_pt_validate_cap(vmx->pt_desc.caps,
vmx              1843 arch/x86/kvm/vmx/vmx.c 			 !intel_pt_validate_cap(vmx->pt_desc.caps,
vmx              1846 arch/x86/kvm/vmx/vmx.c 		msr_info->data = vmx->pt_desc.guest.output_base;
vmx              1850 arch/x86/kvm/vmx/vmx.c 			(!intel_pt_validate_cap(vmx->pt_desc.caps,
vmx              1852 arch/x86/kvm/vmx/vmx.c 			 !intel_pt_validate_cap(vmx->pt_desc.caps,
vmx              1855 arch/x86/kvm/vmx/vmx.c 		msr_info->data = vmx->pt_desc.guest.output_mask;
vmx              1860 arch/x86/kvm/vmx/vmx.c 			(index >= 2 * intel_pt_validate_cap(vmx->pt_desc.caps,
vmx              1864 arch/x86/kvm/vmx/vmx.c 			msr_info->data = vmx->pt_desc.guest.addr_b[index / 2];
vmx              1866 arch/x86/kvm/vmx/vmx.c 			msr_info->data = vmx->pt_desc.guest.addr_a[index / 2];
vmx              1874 arch/x86/kvm/vmx/vmx.c 		msr = find_msr_entry(vmx, msr_info->index);
vmx              1892 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              1905 arch/x86/kvm/vmx/vmx.c 		vmx_segment_cache_clear(vmx);
vmx              1909 arch/x86/kvm/vmx/vmx.c 		vmx_segment_cache_clear(vmx);
vmx              1913 arch/x86/kvm/vmx/vmx.c 		vmx_write_guest_kernel_gs_base(vmx, data);
vmx              1950 arch/x86/kvm/vmx/vmx.c 		if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx))
vmx              1957 arch/x86/kvm/vmx/vmx.c 		vmx->msr_ia32_umwait_control = data;
vmx              1968 arch/x86/kvm/vmx/vmx.c 		vmx->spec_ctrl = data;
vmx              1985 arch/x86/kvm/vmx/vmx.c 		vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap,
vmx              2013 arch/x86/kvm/vmx/vmx.c 		vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD,
vmx              2047 arch/x86/kvm/vmx/vmx.c 		vmx->msr_ia32_feature_control = data;
vmx              2071 arch/x86/kvm/vmx/vmx.c 			add_atomic_switch_msr(vmx, MSR_IA32_XSS,
vmx              2074 arch/x86/kvm/vmx/vmx.c 			clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
vmx              2079 arch/x86/kvm/vmx/vmx.c 			vmx->nested.vmxon)
vmx              2082 arch/x86/kvm/vmx/vmx.c 		vmx->pt_desc.guest.ctl = data;
vmx              2083 arch/x86/kvm/vmx/vmx.c 		pt_update_intercept_for_msr(vmx);
vmx              2087 arch/x86/kvm/vmx/vmx.c 			(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) ||
vmx              2090 arch/x86/kvm/vmx/vmx.c 		vmx->pt_desc.guest.status = data;
vmx              2094 arch/x86/kvm/vmx/vmx.c 			(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) ||
vmx              2095 arch/x86/kvm/vmx/vmx.c 			!intel_pt_validate_cap(vmx->pt_desc.caps,
vmx              2098 arch/x86/kvm/vmx/vmx.c 		vmx->pt_desc.guest.cr3_match = data;
vmx              2102 arch/x86/kvm/vmx/vmx.c 			(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) ||
vmx              2103 arch/x86/kvm/vmx/vmx.c 			(!intel_pt_validate_cap(vmx->pt_desc.caps,
vmx              2105 arch/x86/kvm/vmx/vmx.c 			 !intel_pt_validate_cap(vmx->pt_desc.caps,
vmx              2109 arch/x86/kvm/vmx/vmx.c 		vmx->pt_desc.guest.output_base = data;
vmx              2113 arch/x86/kvm/vmx/vmx.c 			(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) ||
vmx              2114 arch/x86/kvm/vmx/vmx.c 			(!intel_pt_validate_cap(vmx->pt_desc.caps,
vmx              2116 arch/x86/kvm/vmx/vmx.c 			 !intel_pt_validate_cap(vmx->pt_desc.caps,
vmx              2119 arch/x86/kvm/vmx/vmx.c 		vmx->pt_desc.guest.output_mask = data;
vmx              2124 arch/x86/kvm/vmx/vmx.c 			(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) ||
vmx              2125 arch/x86/kvm/vmx/vmx.c 			(index >= 2 * intel_pt_validate_cap(vmx->pt_desc.caps,
vmx              2131 arch/x86/kvm/vmx/vmx.c 			vmx->pt_desc.guest.addr_b[index / 2] = data;
vmx              2133 arch/x86/kvm/vmx/vmx.c 			vmx->pt_desc.guest.addr_a[index / 2] = data;
vmx              2144 arch/x86/kvm/vmx/vmx.c 		msr = find_msr_entry(vmx, msr_index);
vmx              2148 arch/x86/kvm/vmx/vmx.c 			if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
vmx              2646 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              2652 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
vmx              2653 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
vmx              2654 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
vmx              2655 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
vmx              2656 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
vmx              2657 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
vmx              2659 arch/x86/kvm/vmx/vmx.c 	vmx->rmode.vm86_active = 0;
vmx              2661 arch/x86/kvm/vmx/vmx.c 	vmx_segment_cache_clear(vmx);
vmx              2663 arch/x86/kvm/vmx/vmx.c 	vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
vmx              2667 arch/x86/kvm/vmx/vmx.c 	flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
vmx              2675 arch/x86/kvm/vmx/vmx.c 	fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
vmx              2676 arch/x86/kvm/vmx/vmx.c 	fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
vmx              2677 arch/x86/kvm/vmx/vmx.c 	fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
vmx              2678 arch/x86/kvm/vmx/vmx.c 	fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
vmx              2679 arch/x86/kvm/vmx/vmx.c 	fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
vmx              2680 arch/x86/kvm/vmx/vmx.c 	fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
vmx              2719 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              2722 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
vmx              2723 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
vmx              2724 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
vmx              2725 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
vmx              2726 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
vmx              2727 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
vmx              2728 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
vmx              2730 arch/x86/kvm/vmx/vmx.c 	vmx->rmode.vm86_active = 1;
vmx              2740 arch/x86/kvm/vmx/vmx.c 	vmx_segment_cache_clear(vmx);
vmx              2747 arch/x86/kvm/vmx/vmx.c 	vmx->rmode.save_rflags = flags;
vmx              2755 arch/x86/kvm/vmx/vmx.c 	fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
vmx              2756 arch/x86/kvm/vmx/vmx.c 	fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
vmx              2757 arch/x86/kvm/vmx/vmx.c 	fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
vmx              2758 arch/x86/kvm/vmx/vmx.c 	fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
vmx              2759 arch/x86/kvm/vmx/vmx.c 	fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
vmx              2760 arch/x86/kvm/vmx/vmx.c 	fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
vmx              2767 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              2768 arch/x86/kvm/vmx/vmx.c 	struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
vmx              2782 arch/x86/kvm/vmx/vmx.c 	setup_msrs(vmx);
vmx              2886 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              2892 arch/x86/kvm/vmx/vmx.c 		exec_controls_setbit(vmx, CPU_BASED_CR3_LOAD_EXITING |
vmx              2898 arch/x86/kvm/vmx/vmx.c 		exec_controls_clearbit(vmx, CPU_BASED_CR3_LOAD_EXITING |
vmx              2910 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              2919 arch/x86/kvm/vmx/vmx.c 		if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
vmx              2922 arch/x86/kvm/vmx/vmx.c 		if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
vmx              2943 arch/x86/kvm/vmx/vmx.c 	vmx->emulation_required = emulation_required(vcpu);
vmx              3006 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              3017 arch/x86/kvm/vmx/vmx.c 	else if (vmx->rmode.vm86_active)
vmx              3024 arch/x86/kvm/vmx/vmx.c 			secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_DESC);
vmx              3028 arch/x86/kvm/vmx/vmx.c 			secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_DESC);
vmx              3044 arch/x86/kvm/vmx/vmx.c 	if (vmx->nested.vmxon && !nested_cr4_valid(vcpu, cr4))
vmx              3081 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              3084 arch/x86/kvm/vmx/vmx.c 	if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
vmx              3085 arch/x86/kvm/vmx/vmx.c 		*var = vmx->rmode.segs[seg];
vmx              3087 arch/x86/kvm/vmx/vmx.c 		    || var->selector == vmx_read_guest_seg_selector(vmx, seg))
vmx              3089 arch/x86/kvm/vmx/vmx.c 		var->base = vmx_read_guest_seg_base(vmx, seg);
vmx              3090 arch/x86/kvm/vmx/vmx.c 		var->selector = vmx_read_guest_seg_selector(vmx, seg);
vmx              3093 arch/x86/kvm/vmx/vmx.c 	var->base = vmx_read_guest_seg_base(vmx, seg);
vmx              3094 arch/x86/kvm/vmx/vmx.c 	var->limit = vmx_read_guest_seg_limit(vmx, seg);
vmx              3095 arch/x86/kvm/vmx/vmx.c 	var->selector = vmx_read_guest_seg_selector(vmx, seg);
vmx              3096 arch/x86/kvm/vmx/vmx.c 	ar = vmx_read_guest_seg_ar(vmx, seg);
vmx              3128 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              3130 arch/x86/kvm/vmx/vmx.c 	if (unlikely(vmx->rmode.vm86_active))
vmx              3133 arch/x86/kvm/vmx/vmx.c 		int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS);
vmx              3160 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              3163 arch/x86/kvm/vmx/vmx.c 	vmx_segment_cache_clear(vmx);
vmx              3165 arch/x86/kvm/vmx/vmx.c 	if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
vmx              3166 arch/x86/kvm/vmx/vmx.c 		vmx->rmode.segs[seg] = *var;
vmx              3170 arch/x86/kvm/vmx/vmx.c 			fix_rmode_seg(seg, &vmx->rmode.segs[seg]);
vmx              3195 arch/x86/kvm/vmx/vmx.c 	vmx->emulation_required = emulation_required(vcpu);
vmx              3699 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              3700 arch/x86/kvm/vmx/vmx.c 	unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
vmx              3702 arch/x86/kvm/vmx/vmx.c 	u8 changed = mode ^ vmx->msr_bitmap_mode;
vmx              3710 arch/x86/kvm/vmx/vmx.c 	vmx->msr_bitmap_mode = mode;
vmx              3713 arch/x86/kvm/vmx/vmx.c void pt_update_intercept_for_msr(struct vcpu_vmx *vmx)
vmx              3715 arch/x86/kvm/vmx/vmx.c 	unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
vmx              3716 arch/x86/kvm/vmx/vmx.c 	bool flag = !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN);
vmx              3727 arch/x86/kvm/vmx/vmx.c 	for (i = 0; i < vmx->pt_desc.addr_range; i++) {
vmx              3742 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              3749 arch/x86/kvm/vmx/vmx.c 		WARN_ON_ONCE(!vmx->nested.virtual_apic_map.gfn))
vmx              3754 arch/x86/kvm/vmx/vmx.c 	vapic_page = vmx->nested.virtual_apic_map.hva;
vmx              3802 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              3805 arch/x86/kvm/vmx/vmx.c 	    vector == vmx->nested.posted_intr_nv) {
vmx              3810 arch/x86/kvm/vmx/vmx.c 		vmx->nested.pi_pending = true;
vmx              3828 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              3838 arch/x86/kvm/vmx/vmx.c 	if (pi_test_and_set_pir(vector, &vmx->pi_desc))
vmx              3842 arch/x86/kvm/vmx/vmx.c 	if (pi_test_and_set_on(&vmx->pi_desc))
vmx              3857 arch/x86/kvm/vmx/vmx.c void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
vmx              3873 arch/x86/kvm/vmx/vmx.c 	vmx->loaded_vmcs->host_state.cr3 = cr3;
vmx              3878 arch/x86/kvm/vmx/vmx.c 	vmx->loaded_vmcs->host_state.cr4 = cr4;
vmx              3914 arch/x86/kvm/vmx/vmx.c void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
vmx              3916 arch/x86/kvm/vmx/vmx.c 	vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
vmx              3918 arch/x86/kvm/vmx/vmx.c 		vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
vmx              3919 arch/x86/kvm/vmx/vmx.c 	if (is_guest_mode(&vmx->vcpu))
vmx              3920 arch/x86/kvm/vmx/vmx.c 		vmx->vcpu.arch.cr4_guest_owned_bits &=
vmx              3921 arch/x86/kvm/vmx/vmx.c 			~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask;
vmx              3922 arch/x86/kvm/vmx/vmx.c 	vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
vmx              3925 arch/x86/kvm/vmx/vmx.c u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
vmx              3929 arch/x86/kvm/vmx/vmx.c 	if (!kvm_vcpu_apicv_active(&vmx->vcpu))
vmx              3943 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              3945 arch/x86/kvm/vmx/vmx.c 	pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
vmx              3948 arch/x86/kvm/vmx/vmx.c 			secondary_exec_controls_setbit(vmx,
vmx              3952 arch/x86/kvm/vmx/vmx.c 			secondary_exec_controls_clearbit(vmx,
vmx              3961 arch/x86/kvm/vmx/vmx.c u32 vmx_exec_control(struct vcpu_vmx *vmx)
vmx              3965 arch/x86/kvm/vmx/vmx.c 	if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)
vmx              3968 arch/x86/kvm/vmx/vmx.c 	if (!cpu_need_tpr_shadow(&vmx->vcpu)) {
vmx              3979 arch/x86/kvm/vmx/vmx.c 	if (kvm_mwait_in_guest(vmx->vcpu.kvm))
vmx              3982 arch/x86/kvm/vmx/vmx.c 	if (kvm_hlt_in_guest(vmx->vcpu.kvm))
vmx              3988 arch/x86/kvm/vmx/vmx.c static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
vmx              3990 arch/x86/kvm/vmx/vmx.c 	struct kvm_vcpu *vcpu = &vmx->vcpu;
vmx              3998 arch/x86/kvm/vmx/vmx.c 	if (vmx->vpid == 0)
vmx              4006 arch/x86/kvm/vmx/vmx.c 	if (kvm_pause_in_guest(vmx->vcpu.kvm))
vmx              4038 arch/x86/kvm/vmx/vmx.c 				vmx->nested.msrs.secondary_ctls_high |=
vmx              4041 arch/x86/kvm/vmx/vmx.c 				vmx->nested.msrs.secondary_ctls_high &=
vmx              4053 arch/x86/kvm/vmx/vmx.c 				vmx->nested.msrs.secondary_ctls_high |=
vmx              4056 arch/x86/kvm/vmx/vmx.c 				vmx->nested.msrs.secondary_ctls_high &=
vmx              4074 arch/x86/kvm/vmx/vmx.c 				vmx->nested.msrs.secondary_ctls_high |=
vmx              4077 arch/x86/kvm/vmx/vmx.c 				vmx->nested.msrs.secondary_ctls_high &=
vmx              4089 arch/x86/kvm/vmx/vmx.c 				vmx->nested.msrs.secondary_ctls_high |=
vmx              4092 arch/x86/kvm/vmx/vmx.c 				vmx->nested.msrs.secondary_ctls_high &=
vmx              4104 arch/x86/kvm/vmx/vmx.c 				vmx->nested.msrs.secondary_ctls_high |=
vmx              4107 arch/x86/kvm/vmx/vmx.c 				vmx->nested.msrs.secondary_ctls_high &=
vmx              4121 arch/x86/kvm/vmx/vmx.c 				vmx->nested.msrs.secondary_ctls_high |=
vmx              4124 arch/x86/kvm/vmx/vmx.c 				vmx->nested.msrs.secondary_ctls_high &=
vmx              4129 arch/x86/kvm/vmx/vmx.c 	vmx->secondary_exec_control = exec_control;
vmx              4147 arch/x86/kvm/vmx/vmx.c static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmx              4155 arch/x86/kvm/vmx/vmx.c 		vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap));
vmx              4160 arch/x86/kvm/vmx/vmx.c 	pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
vmx              4161 arch/x86/kvm/vmx/vmx.c 	vmx->hv_deadline_tsc = -1;
vmx              4163 arch/x86/kvm/vmx/vmx.c 	exec_controls_set(vmx, vmx_exec_control(vmx));
vmx              4166 arch/x86/kvm/vmx/vmx.c 		vmx_compute_secondary_exec_control(vmx);
vmx              4167 arch/x86/kvm/vmx/vmx.c 		secondary_exec_controls_set(vmx, vmx->secondary_exec_control);
vmx              4170 arch/x86/kvm/vmx/vmx.c 	if (kvm_vcpu_apicv_active(&vmx->vcpu)) {
vmx              4179 arch/x86/kvm/vmx/vmx.c 		vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc)));
vmx              4182 arch/x86/kvm/vmx/vmx.c 	if (!kvm_pause_in_guest(vmx->vcpu.kvm)) {
vmx              4184 arch/x86/kvm/vmx/vmx.c 		vmx->ple_window = ple_window;
vmx              4185 arch/x86/kvm/vmx/vmx.c 		vmx->ple_window_dirty = true;
vmx              4194 arch/x86/kvm/vmx/vmx.c 	vmx_set_constant_host_state(vmx);
vmx              4203 arch/x86/kvm/vmx/vmx.c 	vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
vmx              4205 arch/x86/kvm/vmx/vmx.c 	vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
vmx              4208 arch/x86/kvm/vmx/vmx.c 		vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
vmx              4213 arch/x86/kvm/vmx/vmx.c 		int j = vmx->nmsrs;
vmx              4219 arch/x86/kvm/vmx/vmx.c 		vmx->guest_msrs[j].index = i;
vmx              4220 arch/x86/kvm/vmx/vmx.c 		vmx->guest_msrs[j].data = 0;
vmx              4221 arch/x86/kvm/vmx/vmx.c 		vmx->guest_msrs[j].mask = -1ull;
vmx              4222 arch/x86/kvm/vmx/vmx.c 		++vmx->nmsrs;
vmx              4225 arch/x86/kvm/vmx/vmx.c 	vm_exit_controls_set(vmx, vmx_vmexit_ctrl());
vmx              4228 arch/x86/kvm/vmx/vmx.c 	vm_entry_controls_set(vmx, vmx_vmentry_ctrl());
vmx              4230 arch/x86/kvm/vmx/vmx.c 	vmx->vcpu.arch.cr0_guest_owned_bits = X86_CR0_TS;
vmx              4233 arch/x86/kvm/vmx/vmx.c 	set_cr4_guest_host_mask(vmx);
vmx              4239 arch/x86/kvm/vmx/vmx.c 		vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
vmx              4247 arch/x86/kvm/vmx/vmx.c 		memset(&vmx->pt_desc, 0, sizeof(vmx->pt_desc));
vmx              4249 arch/x86/kvm/vmx/vmx.c 		vmx->pt_desc.guest.output_mask = 0x7F;
vmx              4256 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              4260 arch/x86/kvm/vmx/vmx.c 	vmx->rmode.vm86_active = 0;
vmx              4261 arch/x86/kvm/vmx/vmx.c 	vmx->spec_ctrl = 0;
vmx              4263 arch/x86/kvm/vmx/vmx.c 	vmx->msr_ia32_umwait_control = 0;
vmx              4266 arch/x86/kvm/vmx/vmx.c 	vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
vmx              4267 arch/x86/kvm/vmx/vmx.c 	vmx->hv_deadline_tsc = -1;
vmx              4279 arch/x86/kvm/vmx/vmx.c 	vmx_segment_cache_clear(vmx);
vmx              4323 arch/x86/kvm/vmx/vmx.c 	setup_msrs(vmx);
vmx              4337 arch/x86/kvm/vmx/vmx.c 	if (vmx->vpid != 0)
vmx              4338 arch/x86/kvm/vmx/vmx.c 		vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
vmx              4341 arch/x86/kvm/vmx/vmx.c 	vmx->vcpu.arch.cr0 = cr0;
vmx              4348 arch/x86/kvm/vmx/vmx.c 	vpid_sync_context(vmx->vpid);
vmx              4371 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              4378 arch/x86/kvm/vmx/vmx.c 	if (vmx->rmode.vm86_active) {
vmx              4389 arch/x86/kvm/vmx/vmx.c 			     vmx->vcpu.arch.event_exit_inst_len);
vmx              4399 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              4410 arch/x86/kvm/vmx/vmx.c 		vmx->loaded_vmcs->soft_vnmi_blocked = 1;
vmx              4411 arch/x86/kvm/vmx/vmx.c 		vmx->loaded_vmcs->vnmi_blocked_time = 0;
vmx              4415 arch/x86/kvm/vmx/vmx.c 	vmx->loaded_vmcs->nmi_known_unmasked = false;
vmx              4417 arch/x86/kvm/vmx/vmx.c 	if (vmx->rmode.vm86_active) {
vmx              4430 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              4434 arch/x86/kvm/vmx/vmx.c 		return vmx->loaded_vmcs->soft_vnmi_blocked;
vmx              4435 arch/x86/kvm/vmx/vmx.c 	if (vmx->loaded_vmcs->nmi_known_unmasked)
vmx              4438 arch/x86/kvm/vmx/vmx.c 	vmx->loaded_vmcs->nmi_known_unmasked = !masked;
vmx              4444 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              4447 arch/x86/kvm/vmx/vmx.c 		if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) {
vmx              4448 arch/x86/kvm/vmx/vmx.c 			vmx->loaded_vmcs->soft_vnmi_blocked = masked;
vmx              4449 arch/x86/kvm/vmx/vmx.c 			vmx->loaded_vmcs->vnmi_blocked_time = 0;
vmx              4452 arch/x86/kvm/vmx/vmx.c 		vmx->loaded_vmcs->nmi_known_unmasked = !masked;
vmx              4596 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              4602 arch/x86/kvm/vmx/vmx.c 	vect_info = vmx->idt_vectoring_info;
vmx              4603 arch/x86/kvm/vmx/vmx.c 	intr_info = vmx->exit_intr_info;
vmx              4615 arch/x86/kvm/vmx/vmx.c 	if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) {
vmx              4655 arch/x86/kvm/vmx/vmx.c 	if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no))
vmx              4683 arch/x86/kvm/vmx/vmx.c 		vmx->vcpu.arch.event_exit_inst_len =
vmx              5092 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              5099 arch/x86/kvm/vmx/vmx.c 	idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
vmx              5100 arch/x86/kvm/vmx/vmx.c 	idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK);
vmx              5101 arch/x86/kvm/vmx/vmx.c 	type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
vmx              5117 arch/x86/kvm/vmx/vmx.c 			if (vmx->idt_vectoring_info &
vmx              5221 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              5230 arch/x86/kvm/vmx/vmx.c 	WARN_ON_ONCE(vmx->emulation_required && vmx->nested.nested_run_pending);
vmx              5232 arch/x86/kvm/vmx/vmx.c 	intr_window_requested = exec_controls_get(vmx) &
vmx              5235 arch/x86/kvm/vmx/vmx.c 	while (vmx->emulation_required && count-- != 0) {
vmx              5237 arch/x86/kvm/vmx/vmx.c 			return handle_interrupt_window(&vmx->vcpu);
vmx              5245 arch/x86/kvm/vmx/vmx.c 		if (vmx->emulation_required && !vmx->rmode.vm86_active &&
vmx              5275 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              5276 arch/x86/kvm/vmx/vmx.c 	unsigned int old = vmx->ple_window;
vmx              5278 arch/x86/kvm/vmx/vmx.c 	vmx->ple_window = __grow_ple_window(old, ple_window,
vmx              5282 arch/x86/kvm/vmx/vmx.c 	if (vmx->ple_window != old) {
vmx              5283 arch/x86/kvm/vmx/vmx.c 		vmx->ple_window_dirty = true;
vmx              5285 arch/x86/kvm/vmx/vmx.c 					    vmx->ple_window, old);
vmx              5291 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              5292 arch/x86/kvm/vmx/vmx.c 	unsigned int old = vmx->ple_window;
vmx              5294 arch/x86/kvm/vmx/vmx.c 	vmx->ple_window = __shrink_ple_window(old, ple_window,
vmx              5298 arch/x86/kvm/vmx/vmx.c 	if (vmx->ple_window != old) {
vmx              5299 arch/x86/kvm/vmx/vmx.c 		vmx->ple_window_dirty = true;
vmx              5301 arch/x86/kvm/vmx/vmx.c 					    vmx->ple_window, old);
vmx              5511 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              5513 arch/x86/kvm/vmx/vmx.c 	if (!vmx->req_immediate_exit &&
vmx              5514 arch/x86/kvm/vmx/vmx.c 	    !unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled))
vmx              5608 arch/x86/kvm/vmx/vmx.c static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
vmx              5610 arch/x86/kvm/vmx/vmx.c 	if (vmx->pml_pg) {
vmx              5611 arch/x86/kvm/vmx/vmx.c 		__free_page(vmx->pml_pg);
vmx              5612 arch/x86/kvm/vmx/vmx.c 		vmx->pml_pg = NULL;
vmx              5618 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              5634 arch/x86/kvm/vmx/vmx.c 	pml_buf = page_address(vmx->pml_pg);
vmx              5846 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              5847 arch/x86/kvm/vmx/vmx.c 	u32 exit_reason = vmx->exit_reason;
vmx              5848 arch/x86/kvm/vmx/vmx.c 	u32 vectoring_info = vmx->idt_vectoring_info;
vmx              5863 arch/x86/kvm/vmx/vmx.c 	if (vmx->emulation_required)
vmx              5877 arch/x86/kvm/vmx/vmx.c 	if (unlikely(vmx->fail)) {
vmx              5912 arch/x86/kvm/vmx/vmx.c 		     vmx->loaded_vmcs->soft_vnmi_blocked)) {
vmx              5914 arch/x86/kvm/vmx/vmx.c 			vmx->loaded_vmcs->soft_vnmi_blocked = 0;
vmx              5915 arch/x86/kvm/vmx/vmx.c 		} else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL &&
vmx              5926 arch/x86/kvm/vmx/vmx.c 			vmx->loaded_vmcs->soft_vnmi_blocked = 0;
vmx              6034 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              6046 arch/x86/kvm/vmx/vmx.c 		vmx->nested.change_vmcs01_virtual_apic_mode = true;
vmx              6050 arch/x86/kvm/vmx/vmx.c 	sec_exec_control = secondary_exec_controls_get(vmx);
vmx              6072 arch/x86/kvm/vmx/vmx.c 	secondary_exec_controls_set(vmx, sec_exec_control);
vmx              6135 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              6140 arch/x86/kvm/vmx/vmx.c 	if (pi_test_on(&vmx->pi_desc)) {
vmx              6141 arch/x86/kvm/vmx/vmx.c 		pi_clear_on(&vmx->pi_desc);
vmx              6148 arch/x86/kvm/vmx/vmx.c 			kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr);
vmx              6192 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              6194 arch/x86/kvm/vmx/vmx.c 	pi_clear_on(&vmx->pi_desc);
vmx              6195 arch/x86/kvm/vmx/vmx.c 	memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir));
vmx              6198 arch/x86/kvm/vmx/vmx.c static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
vmx              6200 arch/x86/kvm/vmx/vmx.c 	vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
vmx              6203 arch/x86/kvm/vmx/vmx.c 	if (is_page_fault(vmx->exit_intr_info))
vmx              6204 arch/x86/kvm/vmx/vmx.c 		vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
vmx              6207 arch/x86/kvm/vmx/vmx.c 	if (is_machine_check(vmx->exit_intr_info))
vmx              6211 arch/x86/kvm/vmx/vmx.c 	if (is_nmi(vmx->exit_intr_info)) {
vmx              6212 arch/x86/kvm/vmx/vmx.c 		kvm_before_interrupt(&vmx->vcpu);
vmx              6214 arch/x86/kvm/vmx/vmx.c 		kvm_after_interrupt(&vmx->vcpu);
vmx              6266 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              6268 arch/x86/kvm/vmx/vmx.c 	if (vmx->exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
vmx              6270 arch/x86/kvm/vmx/vmx.c 	else if (vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI)
vmx              6271 arch/x86/kvm/vmx/vmx.c 		handle_exception_nmi_irqoff(vmx);
vmx              6298 arch/x86/kvm/vmx/vmx.c static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
vmx              6305 arch/x86/kvm/vmx/vmx.c 	idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
vmx              6308 arch/x86/kvm/vmx/vmx.c 		if (vmx->loaded_vmcs->nmi_known_unmasked)
vmx              6332 arch/x86/kvm/vmx/vmx.c 			vmx->loaded_vmcs->nmi_known_unmasked =
vmx              6335 arch/x86/kvm/vmx/vmx.c 	} else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked))
vmx              6336 arch/x86/kvm/vmx/vmx.c 		vmx->loaded_vmcs->vnmi_blocked_time +=
vmx              6338 arch/x86/kvm/vmx/vmx.c 					      vmx->loaded_vmcs->entry_time));
vmx              6395 arch/x86/kvm/vmx/vmx.c static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
vmx              6397 arch/x86/kvm/vmx/vmx.c 	__vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info,
vmx              6412 arch/x86/kvm/vmx/vmx.c static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
vmx              6424 arch/x86/kvm/vmx/vmx.c 			clear_atomic_switch_msr(vmx, msrs[i].msr);
vmx              6426 arch/x86/kvm/vmx/vmx.c 			add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
vmx              6430 arch/x86/kvm/vmx/vmx.c static void atomic_switch_umwait_control_msr(struct vcpu_vmx *vmx)
vmx              6434 arch/x86/kvm/vmx/vmx.c 	if (!vmx_has_waitpkg(vmx))
vmx              6439 arch/x86/kvm/vmx/vmx.c 	if (vmx->msr_ia32_umwait_control != host_umwait_control)
vmx              6440 arch/x86/kvm/vmx/vmx.c 		add_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL,
vmx              6441 arch/x86/kvm/vmx/vmx.c 			vmx->msr_ia32_umwait_control,
vmx              6444 arch/x86/kvm/vmx/vmx.c 		clear_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL);
vmx              6449 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              6453 arch/x86/kvm/vmx/vmx.c 	if (vmx->req_immediate_exit) {
vmx              6455 arch/x86/kvm/vmx/vmx.c 		vmx->loaded_vmcs->hv_timer_soft_disabled = false;
vmx              6456 arch/x86/kvm/vmx/vmx.c 	} else if (vmx->hv_deadline_tsc != -1) {
vmx              6458 arch/x86/kvm/vmx/vmx.c 		if (vmx->hv_deadline_tsc > tscl)
vmx              6460 arch/x86/kvm/vmx/vmx.c 			delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
vmx              6466 arch/x86/kvm/vmx/vmx.c 		vmx->loaded_vmcs->hv_timer_soft_disabled = false;
vmx              6467 arch/x86/kvm/vmx/vmx.c 	} else if (!vmx->loaded_vmcs->hv_timer_soft_disabled) {
vmx              6469 arch/x86/kvm/vmx/vmx.c 		vmx->loaded_vmcs->hv_timer_soft_disabled = true;
vmx              6473 arch/x86/kvm/vmx/vmx.c void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
vmx              6475 arch/x86/kvm/vmx/vmx.c 	if (unlikely(host_rsp != vmx->loaded_vmcs->host_state.rsp)) {
vmx              6476 arch/x86/kvm/vmx/vmx.c 		vmx->loaded_vmcs->host_state.rsp = host_rsp;
vmx              6481 arch/x86/kvm/vmx/vmx.c bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
vmx              6485 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              6490 arch/x86/kvm/vmx/vmx.c 		     vmx->loaded_vmcs->soft_vnmi_blocked))
vmx              6491 arch/x86/kvm/vmx/vmx.c 		vmx->loaded_vmcs->entry_time = ktime_get();
vmx              6495 arch/x86/kvm/vmx/vmx.c 	if (vmx->emulation_required)
vmx              6498 arch/x86/kvm/vmx/vmx.c 	if (vmx->ple_window_dirty) {
vmx              6499 arch/x86/kvm/vmx/vmx.c 		vmx->ple_window_dirty = false;
vmx              6500 arch/x86/kvm/vmx/vmx.c 		vmcs_write32(PLE_WINDOW, vmx->ple_window);
vmx              6503 arch/x86/kvm/vmx/vmx.c 	if (vmx->nested.need_vmcs12_to_shadow_sync)
vmx              6512 arch/x86/kvm/vmx/vmx.c 	if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
vmx              6514 arch/x86/kvm/vmx/vmx.c 		vmx->loaded_vmcs->host_state.cr3 = cr3;
vmx              6518 arch/x86/kvm/vmx/vmx.c 	if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
vmx              6520 arch/x86/kvm/vmx/vmx.c 		vmx->loaded_vmcs->host_state.cr4 = cr4;
vmx              6533 arch/x86/kvm/vmx/vmx.c 	pt_guest_enter(vmx);
vmx              6535 arch/x86/kvm/vmx/vmx.c 	atomic_switch_perf_msrs(vmx);
vmx              6536 arch/x86/kvm/vmx/vmx.c 	atomic_switch_umwait_control_msr(vmx);
vmx              6551 arch/x86/kvm/vmx/vmx.c 	x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
vmx              6562 arch/x86/kvm/vmx/vmx.c 	vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
vmx              6563 arch/x86/kvm/vmx/vmx.c 				   vmx->loaded_vmcs->launched);
vmx              6583 arch/x86/kvm/vmx/vmx.c 		vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
vmx              6585 arch/x86/kvm/vmx/vmx.c 	x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
vmx              6596 arch/x86/kvm/vmx/vmx.c 	if (vmx->host_debugctlmsr)
vmx              6597 arch/x86/kvm/vmx/vmx.c 		update_debugctlmsr(vmx->host_debugctlmsr);
vmx              6619 arch/x86/kvm/vmx/vmx.c 	pt_guest_exit(vmx);
vmx              6623 arch/x86/kvm/vmx/vmx.c 	vmx->nested.nested_run_pending = 0;
vmx              6624 arch/x86/kvm/vmx/vmx.c 	vmx->idt_vectoring_info = 0;
vmx              6626 arch/x86/kvm/vmx/vmx.c 	vmx->exit_reason = vmx->fail ? 0xdead : vmcs_read32(VM_EXIT_REASON);
vmx              6627 arch/x86/kvm/vmx/vmx.c 	if ((u16)vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY)
vmx              6630 arch/x86/kvm/vmx/vmx.c 	if (vmx->fail || (vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
vmx              6633 arch/x86/kvm/vmx/vmx.c 	vmx->loaded_vmcs->launched = 1;
vmx              6634 arch/x86/kvm/vmx/vmx.c 	vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
vmx              6636 arch/x86/kvm/vmx/vmx.c 	vmx_recover_nmi_blocking(vmx);
vmx              6637 arch/x86/kvm/vmx/vmx.c 	vmx_complete_interrupts(vmx);
vmx              6660 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              6663 arch/x86/kvm/vmx/vmx.c 		vmx_destroy_pml_buffer(vmx);
vmx              6664 arch/x86/kvm/vmx/vmx.c 	free_vpid(vmx->vpid);
vmx              6666 arch/x86/kvm/vmx/vmx.c 	free_loaded_vmcs(vmx->loaded_vmcs);
vmx              6667 arch/x86/kvm/vmx/vmx.c 	kfree(vmx->guest_msrs);
vmx              6669 arch/x86/kvm/vmx/vmx.c 	kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.user_fpu);
vmx              6670 arch/x86/kvm/vmx/vmx.c 	kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu);
vmx              6671 arch/x86/kvm/vmx/vmx.c 	kmem_cache_free(kvm_vcpu_cache, vmx);
vmx              6677 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx;
vmx              6684 arch/x86/kvm/vmx/vmx.c 	vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
vmx              6685 arch/x86/kvm/vmx/vmx.c 	if (!vmx)
vmx              6688 arch/x86/kvm/vmx/vmx.c 	vmx->vcpu.arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache,
vmx              6690 arch/x86/kvm/vmx/vmx.c 	if (!vmx->vcpu.arch.user_fpu) {
vmx              6696 arch/x86/kvm/vmx/vmx.c 	vmx->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache,
vmx              6698 arch/x86/kvm/vmx/vmx.c 	if (!vmx->vcpu.arch.guest_fpu) {
vmx              6704 arch/x86/kvm/vmx/vmx.c 	vmx->vpid = allocate_vpid();
vmx              6706 arch/x86/kvm/vmx/vmx.c 	err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
vmx              6719 arch/x86/kvm/vmx/vmx.c 		vmx->pml_pg = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
vmx              6720 arch/x86/kvm/vmx/vmx.c 		if (!vmx->pml_pg)
vmx              6724 arch/x86/kvm/vmx/vmx.c 	vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT);
vmx              6725 arch/x86/kvm/vmx/vmx.c 	BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0])
vmx              6728 arch/x86/kvm/vmx/vmx.c 	if (!vmx->guest_msrs)
vmx              6731 arch/x86/kvm/vmx/vmx.c 	err = alloc_loaded_vmcs(&vmx->vmcs01);
vmx              6735 arch/x86/kvm/vmx/vmx.c 	msr_bitmap = vmx->vmcs01.msr_bitmap;
vmx              6749 arch/x86/kvm/vmx/vmx.c 	vmx->msr_bitmap_mode = 0;
vmx              6751 arch/x86/kvm/vmx/vmx.c 	vmx->loaded_vmcs = &vmx->vmcs01;
vmx              6753 arch/x86/kvm/vmx/vmx.c 	vmx_vcpu_load(&vmx->vcpu, cpu);
vmx              6754 arch/x86/kvm/vmx/vmx.c 	vmx->vcpu.cpu = cpu;
vmx              6755 arch/x86/kvm/vmx/vmx.c 	vmx_vcpu_setup(vmx);
vmx              6756 arch/x86/kvm/vmx/vmx.c 	vmx_vcpu_put(&vmx->vcpu);
vmx              6758 arch/x86/kvm/vmx/vmx.c 	if (cpu_need_virtualize_apic_accesses(&vmx->vcpu)) {
vmx              6771 arch/x86/kvm/vmx/vmx.c 		nested_vmx_setup_ctls_msrs(&vmx->nested.msrs,
vmx              6774 arch/x86/kvm/vmx/vmx.c 		memset(&vmx->nested.msrs, 0, sizeof(vmx->nested.msrs));
vmx              6776 arch/x86/kvm/vmx/vmx.c 	vmx->nested.posted_intr_nv = -1;
vmx              6777 arch/x86/kvm/vmx/vmx.c 	vmx->nested.current_vmptr = -1ull;
vmx              6779 arch/x86/kvm/vmx/vmx.c 	vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED;
vmx              6785 arch/x86/kvm/vmx/vmx.c 	vmx->pi_desc.nv = POSTED_INTR_VECTOR;
vmx              6786 arch/x86/kvm/vmx/vmx.c 	vmx->pi_desc.sn = 1;
vmx              6788 arch/x86/kvm/vmx/vmx.c 	vmx->ept_pointer = INVALID_PAGE;
vmx              6790 arch/x86/kvm/vmx/vmx.c 	return &vmx->vcpu;
vmx              6793 arch/x86/kvm/vmx/vmx.c 	free_loaded_vmcs(vmx->loaded_vmcs);
vmx              6795 arch/x86/kvm/vmx/vmx.c 	kfree(vmx->guest_msrs);
vmx              6797 arch/x86/kvm/vmx/vmx.c 	vmx_destroy_pml_buffer(vmx);
vmx              6799 arch/x86/kvm/vmx/vmx.c 	kvm_vcpu_uninit(&vmx->vcpu);
vmx              6801 arch/x86/kvm/vmx/vmx.c 	free_vpid(vmx->vpid);
vmx              6802 arch/x86/kvm/vmx/vmx.c 	kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu);
vmx              6804 arch/x86/kvm/vmx/vmx.c 	kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.user_fpu);
vmx              6806 arch/x86/kvm/vmx/vmx.c 	kmem_cache_free(kvm_vcpu_cache, vmx);
vmx              6914 arch/x86/kvm/vmx/vmx.c static void vmcs_set_secondary_exec_control(struct vcpu_vmx *vmx)
vmx              6928 arch/x86/kvm/vmx/vmx.c 	u32 new_ctl = vmx->secondary_exec_control;
vmx              6929 arch/x86/kvm/vmx/vmx.c 	u32 cur_ctl = secondary_exec_controls_get(vmx);
vmx              6931 arch/x86/kvm/vmx/vmx.c 	secondary_exec_controls_set(vmx, (new_ctl & ~mask) | (cur_ctl & mask));
vmx              6940 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              6943 arch/x86/kvm/vmx/vmx.c 	vmx->nested.msrs.cr0_fixed1 = 0xffffffff;
vmx              6944 arch/x86/kvm/vmx/vmx.c 	vmx->nested.msrs.cr4_fixed1 = X86_CR4_PCE;
vmx              6948 arch/x86/kvm/vmx/vmx.c 		vmx->nested.msrs.cr4_fixed1 |= (_cr4_mask);	\
vmx              6979 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              6985 arch/x86/kvm/vmx/vmx.c 			vmx->nested.msrs.entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
vmx              6986 arch/x86/kvm/vmx/vmx.c 			vmx->nested.msrs.exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
vmx              6988 arch/x86/kvm/vmx/vmx.c 			vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_BNDCFGS;
vmx              6989 arch/x86/kvm/vmx/vmx.c 			vmx->nested.msrs.exit_ctls_high &= ~VM_EXIT_CLEAR_BNDCFGS;
vmx              6996 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              7004 arch/x86/kvm/vmx/vmx.c 		vmx->pt_desc.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM] = best->eax;
vmx              7005 arch/x86/kvm/vmx/vmx.c 		vmx->pt_desc.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM] = best->ebx;
vmx              7006 arch/x86/kvm/vmx/vmx.c 		vmx->pt_desc.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM] = best->ecx;
vmx              7007 arch/x86/kvm/vmx/vmx.c 		vmx->pt_desc.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM] = best->edx;
vmx              7011 arch/x86/kvm/vmx/vmx.c 	vmx->pt_desc.addr_range = intel_pt_validate_cap(vmx->pt_desc.caps,
vmx              7015 arch/x86/kvm/vmx/vmx.c 	vmx->pt_desc.ctl_bitmask = ~(RTIT_CTL_TRACEEN | RTIT_CTL_OS |
vmx              7022 arch/x86/kvm/vmx/vmx.c 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_cr3_filtering))
vmx              7023 arch/x86/kvm/vmx/vmx.c 		vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_CR3EN;
vmx              7029 arch/x86/kvm/vmx/vmx.c 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc))
vmx              7030 arch/x86/kvm/vmx/vmx.c 		vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_CYCLEACC |
vmx              7037 arch/x86/kvm/vmx/vmx.c 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc))
vmx              7038 arch/x86/kvm/vmx/vmx.c 		vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_MTC_EN |
vmx              7042 arch/x86/kvm/vmx/vmx.c 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_ptwrite))
vmx              7043 arch/x86/kvm/vmx/vmx.c 		vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_FUP_ON_PTW |
vmx              7047 arch/x86/kvm/vmx/vmx.c 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_power_event_trace))
vmx              7048 arch/x86/kvm/vmx/vmx.c 		vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_PWR_EVT_EN;
vmx              7051 arch/x86/kvm/vmx/vmx.c 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_topa_output))
vmx              7052 arch/x86/kvm/vmx/vmx.c 		vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_TOPA;
vmx              7055 arch/x86/kvm/vmx/vmx.c 	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_output_subsys))
vmx              7056 arch/x86/kvm/vmx/vmx.c 		vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_FABRIC_EN;
vmx              7059 arch/x86/kvm/vmx/vmx.c 	for (i = 0; i < vmx->pt_desc.addr_range; i++)
vmx              7060 arch/x86/kvm/vmx/vmx.c 		vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4));
vmx              7065 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              7068 arch/x86/kvm/vmx/vmx.c 		vmx_compute_secondary_exec_control(vmx);
vmx              7069 arch/x86/kvm/vmx/vmx.c 		vmcs_set_secondary_exec_control(vmx);
vmx              7204 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx;
vmx              7212 arch/x86/kvm/vmx/vmx.c 	vmx = to_vmx(vcpu);
vmx              7240 arch/x86/kvm/vmx/vmx.c 	vmx->hv_deadline_tsc = tscl + delta_tsc;
vmx              7278 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              7282 arch/x86/kvm/vmx/vmx.c 		WARN_ON_ONCE(vmx->nested.pml_full);
vmx              7294 arch/x86/kvm/vmx/vmx.c 			vmx->nested.pml_full = true;
vmx              7563 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              7565 arch/x86/kvm/vmx/vmx.c 	vmx->nested.smm.guest_mode = is_guest_mode(vcpu);
vmx              7566 arch/x86/kvm/vmx/vmx.c 	if (vmx->nested.smm.guest_mode)
vmx              7569 arch/x86/kvm/vmx/vmx.c 	vmx->nested.smm.vmxon = vmx->nested.vmxon;
vmx              7570 arch/x86/kvm/vmx/vmx.c 	vmx->nested.vmxon = false;
vmx              7577 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx              7580 arch/x86/kvm/vmx/vmx.c 	if (vmx->nested.smm.vmxon) {
vmx              7581 arch/x86/kvm/vmx/vmx.c 		vmx->nested.vmxon = true;
vmx              7582 arch/x86/kvm/vmx/vmx.c 		vmx->nested.smm.vmxon = false;
vmx              7585 arch/x86/kvm/vmx/vmx.c 	if (vmx->nested.smm.guest_mode) {
vmx              7590 arch/x86/kvm/vmx/vmx.c 		vmx->nested.smm.guest_mode = false;
vmx               312 arch/x86/kvm/vmx/vmx.h void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
vmx               325 arch/x86/kvm/vmx/vmx.h void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
vmx               335 arch/x86/kvm/vmx/vmx.h struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr);
vmx               336 arch/x86/kvm/vmx/vmx.h void pt_update_intercept_for_msr(struct vcpu_vmx *vmx);
vmx               337 arch/x86/kvm/vmx/vmx.h void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
vmx               406 arch/x86/kvm/vmx/vmx.h static inline void lname##_controls_set(struct vcpu_vmx *vmx, u32 val)	    \
vmx               408 arch/x86/kvm/vmx/vmx.h 	if (vmx->loaded_vmcs->controls_shadow.lname != val) {		    \
vmx               410 arch/x86/kvm/vmx/vmx.h 		vmx->loaded_vmcs->controls_shadow.lname = val;		    \
vmx               413 arch/x86/kvm/vmx/vmx.h static inline u32 lname##_controls_get(struct vcpu_vmx *vmx)		    \
vmx               415 arch/x86/kvm/vmx/vmx.h 	return vmx->loaded_vmcs->controls_shadow.lname;			    \
vmx               417 arch/x86/kvm/vmx/vmx.h static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u32 val)   \
vmx               419 arch/x86/kvm/vmx/vmx.h 	lname##_controls_set(vmx, lname##_controls_get(vmx) | val);	    \
vmx               421 arch/x86/kvm/vmx/vmx.h static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u32 val) \
vmx               423 arch/x86/kvm/vmx/vmx.h 	lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val);	    \
vmx               431 arch/x86/kvm/vmx/vmx.h static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
vmx               433 arch/x86/kvm/vmx/vmx.h 	vmx->segment_cache.bitmask = 0;
vmx               458 arch/x86/kvm/vmx/vmx.h u32 vmx_exec_control(struct vcpu_vmx *vmx);
vmx               459 arch/x86/kvm/vmx/vmx.h u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx);
vmx               509 arch/x86/kvm/vmx/vmx.h static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx)
vmx               511 arch/x86/kvm/vmx/vmx.h 	vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
vmx               512 arch/x86/kvm/vmx/vmx.h 	vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
vmx               515 arch/x86/kvm/vmx/vmx.h static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
vmx               517 arch/x86/kvm/vmx/vmx.h 	return vmx->secondary_exec_control &
vmx               420 tools/arch/x86/include/uapi/asm/kvm.h 		struct kvm_vmx_nested_state_hdr vmx;
vmx               432 tools/arch/x86/include/uapi/asm/kvm.h 		struct kvm_vmx_nested_state_data vmx[0];
vmx               579 tools/testing/selftests/kvm/include/x86_64/vmx.h bool prepare_for_vmx_operation(struct vmx_pages *vmx);
vmx               580 tools/testing/selftests/kvm/include/x86_64/vmx.h void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
vmx               581 tools/testing/selftests/kvm/include/x86_64/vmx.h bool load_vmcs(struct vmx_pages *vmx);
vmx               585 tools/testing/selftests/kvm/include/x86_64/vmx.h void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
vmx               587 tools/testing/selftests/kvm/include/x86_64/vmx.h void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
vmx               590 tools/testing/selftests/kvm/include/x86_64/vmx.h void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
vmx               592 tools/testing/selftests/kvm/include/x86_64/vmx.h void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
vmx                81 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva);
vmx                84 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	vmx->vmxon = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
vmx                85 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon);
vmx                86 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon);
vmx                89 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	vmx->vmcs = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
vmx                90 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	vmx->vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmcs);
vmx                91 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	vmx->vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmcs);
vmx                94 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	vmx->msr = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
vmx                95 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	vmx->msr_hva = addr_gva2hva(vm, (uintptr_t)vmx->msr);
vmx                96 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	vmx->msr_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->msr);
vmx                97 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	memset(vmx->msr_hva, 0, getpagesize());
vmx               100 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	vmx->shadow_vmcs = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
vmx               101 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	vmx->shadow_vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->shadow_vmcs);
vmx               102 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	vmx->shadow_vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->shadow_vmcs);
vmx               105 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	vmx->vmread = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
vmx               106 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	vmx->vmread_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmread);
vmx               107 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	vmx->vmread_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmread);
vmx               108 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	memset(vmx->vmread_hva, 0, getpagesize());
vmx               110 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	vmx->vmwrite = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
vmx               111 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	vmx->vmwrite_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmwrite);
vmx               112 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	vmx->vmwrite_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmwrite);
vmx               113 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	memset(vmx->vmwrite_hva, 0, getpagesize());
vmx               116 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	vmx->vp_assist = (void *)vm_vaddr_alloc(vm, getpagesize(),
vmx               118 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	vmx->vp_assist_hva = addr_gva2hva(vm, (uintptr_t)vmx->vp_assist);
vmx               119 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	vmx->vp_assist_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vp_assist);
vmx               122 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	vmx->enlightened_vmcs = (void *)vm_vaddr_alloc(vm, getpagesize(),
vmx               124 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	vmx->enlightened_vmcs_hva =
vmx               125 tools/testing/selftests/kvm/lib/x86_64/vmx.c 		addr_gva2hva(vm, (uintptr_t)vmx->enlightened_vmcs);
vmx               126 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	vmx->enlightened_vmcs_gpa =
vmx               127 tools/testing/selftests/kvm/lib/x86_64/vmx.c 		addr_gva2gpa(vm, (uintptr_t)vmx->enlightened_vmcs);
vmx               130 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	return vmx;
vmx               133 tools/testing/selftests/kvm/lib/x86_64/vmx.c bool prepare_for_vmx_operation(struct vmx_pages *vmx)
vmx               170 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	*(uint32_t *)(vmx->vmxon) = vmcs_revision();
vmx               171 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	if (vmxon(vmx->vmxon_gpa))
vmx               177 tools/testing/selftests/kvm/lib/x86_64/vmx.c bool load_vmcs(struct vmx_pages *vmx)
vmx               181 tools/testing/selftests/kvm/lib/x86_64/vmx.c 		*(uint32_t *)(vmx->vmcs) = vmcs_revision();
vmx               182 tools/testing/selftests/kvm/lib/x86_64/vmx.c 		if (vmclear(vmx->vmcs_gpa))
vmx               185 tools/testing/selftests/kvm/lib/x86_64/vmx.c 		if (vmptrld(vmx->vmcs_gpa))
vmx               189 tools/testing/selftests/kvm/lib/x86_64/vmx.c 		*(uint32_t *)(vmx->shadow_vmcs) =
vmx               191 tools/testing/selftests/kvm/lib/x86_64/vmx.c 		if (vmclear(vmx->shadow_vmcs_gpa))
vmx               194 tools/testing/selftests/kvm/lib/x86_64/vmx.c 		if (evmcs_vmptrld(vmx->enlightened_vmcs_gpa,
vmx               195 tools/testing/selftests/kvm/lib/x86_64/vmx.c 				  vmx->enlightened_vmcs))
vmx               206 tools/testing/selftests/kvm/lib/x86_64/vmx.c static inline void init_vmcs_control_fields(struct vmx_pages *vmx)
vmx               215 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	if (vmx->eptp_gpa) {
vmx               221 tools/testing/selftests/kvm/lib/x86_64/vmx.c 			.address = vmx->eptp_gpa >> PAGE_SHIFT_4K,
vmx               256 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	vmwrite(MSR_BITMAP, vmx->msr_gpa);
vmx               257 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	vmwrite(VMREAD_BITMAP, vmx->vmread_gpa);
vmx               258 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	vmwrite(VMWRITE_BITMAP, vmx->vmwrite_gpa);
vmx               375 tools/testing/selftests/kvm/lib/x86_64/vmx.c void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp)
vmx               377 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	init_vmcs_control_fields(vmx);
vmx               392 tools/testing/selftests/kvm/lib/x86_64/vmx.c void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
vmx               424 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	pml4e = vmx->eptp_hva;
vmx               491 tools/testing/selftests/kvm/lib/x86_64/vmx.c void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
vmx               502 tools/testing/selftests/kvm/lib/x86_64/vmx.c 		nested_pg_map(vmx, vm, nested_paddr, paddr, eptp_memslot);
vmx               511 tools/testing/selftests/kvm/lib/x86_64/vmx.c void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
vmx               525 tools/testing/selftests/kvm/lib/x86_64/vmx.c 		nested_map(vmx, vm,
vmx               533 tools/testing/selftests/kvm/lib/x86_64/vmx.c void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
vmx               536 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	vmx->eptp = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
vmx               537 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	vmx->eptp_hva = addr_gva2hva(vm, (uintptr_t)vmx->eptp);
vmx               538 tools/testing/selftests/kvm/lib/x86_64/vmx.c 	vmx->eptp_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->eptp);
vmx                50 tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c void l1_guest_code(struct vmx_pages *vmx)
vmx                55 tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c 	GUEST_ASSERT(vmx->vmcs_gpa);
vmx                56 tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c 	GUEST_ASSERT(prepare_for_vmx_operation(vmx));
vmx                57 tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c 	GUEST_ASSERT(load_vmcs(vmx));
vmx                59 tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c 	prepare_vmcs(vmx, l2_guest_code,
vmx                72 tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c 	struct vmx_pages *vmx;
vmx                86 tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c 	vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
vmx               111 tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c 	prepare_eptp(vmx, vm, 0);
vmx               112 tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c 	nested_map_memslot(vmx, vm, 0, 0);
vmx               113 tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c 	nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096, 0);
vmx               114 tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c 	nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096, 0);
vmx                85 tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c 	state->hdr.vmx.vmxon_pa = 0x1000;
vmx                86 tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c 	state->hdr.vmx.vmcs12_pa = 0x2000;
vmx                87 tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c 	state->hdr.vmx.smm.flags = 0;
vmx               116 tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c 	state->hdr.vmx.vmxon_pa = -1ull;
vmx               119 tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c 	state->hdr.vmx.vmcs12_pa = -1ull;
vmx               136 tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c 	state->hdr.vmx.vmxon_pa = -1ull;
vmx               137 tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c 	state->hdr.vmx.vmcs12_pa = -1ull;
vmx               148 tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c 	state->hdr.vmx.smm.flags = 1;
vmx               153 tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c 	state->hdr.vmx.vmxon_pa = -1ull;
vmx               159 tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c 	state->hdr.vmx.vmxon_pa = 1;
vmx               169 tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c 	state->hdr.vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE;
vmx               178 tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c 	state->hdr.vmx.smm.flags = ~(KVM_STATE_NESTED_SMM_GUEST_MODE |
vmx               185 tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c 	state->hdr.vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE;
vmx               195 tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c 	state->hdr.vmx.vmxon_pa = 0;
vmx               196 tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c 	state->hdr.vmx.vmcs12_pa = 0;
vmx               209 tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c 	state->hdr.vmx.vmxon_pa = -1ull;
vmx               210 tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c 	state->hdr.vmx.vmcs12_pa = -1ull;
vmx               217 tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c 	TEST_ASSERT(state->hdr.vmx.vmxon_pa == -1ull, "vmxon_pa must be -1ull.");
vmx               218 tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c 	TEST_ASSERT(state->hdr.vmx.vmcs12_pa == -1ull, "vmcs_pa must be -1ull.");
vmx                99 tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c 	unsigned long vmx[VMX_MAX + 2][2];
vmx               104 tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c 	FAIL_IF(show_vmx(child, vmx));
vmx               105 tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c 	FAIL_IF(validate_vmx(vmx, fp_load));
vmx               108 tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c 	FAIL_IF(show_vmx_ckpt(child, vmx));
vmx               109 tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c 	FAIL_IF(validate_vmx(vmx, fp_load_ckpt));
vmx               112 tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c 	memset(vmx, 0, sizeof(vmx));
vmx               114 tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c 	load_vsx_vmx(fp_load_ckpt_new, vsx, vmx);
vmx               117 tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c 	FAIL_IF(write_vmx_ckpt(child, vmx));
vmx                87 tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c 	unsigned long vmx[VMX_MAX + 2][2];
vmx                92 tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c 	FAIL_IF(show_vmx(child, vmx));
vmx                93 tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c 	FAIL_IF(validate_vmx(vmx, fp_load));
vmx                96 tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c 	FAIL_IF(show_vmx_ckpt(child, vmx));
vmx                97 tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c 	FAIL_IF(validate_vmx(vmx, fp_load_ckpt));
vmx                99 tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c 	memset(vmx, 0, sizeof(vmx));
vmx               101 tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c 	load_vsx_vmx(fp_load_ckpt_new, vsx, vmx);
vmx               104 tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c 	FAIL_IF(write_vmx_ckpt(child, vmx));
vmx                40 tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c 	unsigned long vmx[VMX_MAX + 2][2];
vmx                45 tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c 	FAIL_IF(show_vmx(child, vmx));
vmx                46 tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c 	FAIL_IF(validate_vmx(vmx, fp_load));
vmx                49 tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c 	memset(vmx, 0, sizeof(vmx));
vmx                50 tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c 	load_vsx_vmx(fp_load_new, vsx, vmx);
vmx                53 tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c 	FAIL_IF(write_vmx(child, vmx));
vmx                31 tools/testing/selftests/powerpc/ptrace/ptrace-vsx.h int validate_vmx(unsigned long vmx[][2], unsigned long *load)
vmx                37 tools/testing/selftests/powerpc/ptrace/ptrace-vsx.h 		if ((vmx[i][0] != load[64 + 2 * i]) ||
vmx                38 tools/testing/selftests/powerpc/ptrace/ptrace-vsx.h 				(vmx[i][1] != load[65 + 2 * i])) {
vmx                40 tools/testing/selftests/powerpc/ptrace/ptrace-vsx.h 					i, vmx[i][0], 64 + 2 * i,
vmx                43 tools/testing/selftests/powerpc/ptrace/ptrace-vsx.h 					i, vmx[i][1], 65 + 2 * i,
vmx                51 tools/testing/selftests/powerpc/ptrace/ptrace-vsx.h 		if ((vmx[i][0] != load[65 + 2 * i]) ||
vmx                52 tools/testing/selftests/powerpc/ptrace/ptrace-vsx.h 				(vmx[i][1] != load[64 + 2 * i])) {
vmx                54 tools/testing/selftests/powerpc/ptrace/ptrace-vsx.h 					i, vmx[i][0], 65 + 2 * i,
vmx                57 tools/testing/selftests/powerpc/ptrace/ptrace-vsx.h 					i, vmx[i][1], 64 + 2 * i,
vmx               109 tools/testing/selftests/powerpc/ptrace/ptrace-vsx.h 		unsigned long vmx[][2])
vmx               117 tools/testing/selftests/powerpc/ptrace/ptrace-vsx.h 		vmx[i][0] = load[64 + 2 * i];
vmx               118 tools/testing/selftests/powerpc/ptrace/ptrace-vsx.h 		vmx[i][1] = load[65 + 2 * i];
vmx               529 tools/testing/selftests/powerpc/ptrace/ptrace.h int show_vmx(pid_t child, unsigned long vmx[][2])
vmx               533 tools/testing/selftests/powerpc/ptrace/ptrace.h 	ret = ptrace(PTRACE_GETVRREGS, child, 0, vmx);
vmx               541 tools/testing/selftests/powerpc/ptrace/ptrace.h int show_vmx_ckpt(pid_t child, unsigned long vmx[][2])
vmx               554 tools/testing/selftests/powerpc/ptrace/ptrace.h 	memcpy(vmx, regs, sizeof(regs));
vmx               559 tools/testing/selftests/powerpc/ptrace/ptrace.h int write_vmx(pid_t child, unsigned long vmx[][2])
vmx               563 tools/testing/selftests/powerpc/ptrace/ptrace.h 	ret = ptrace(PTRACE_SETVRREGS, child, 0, vmx);
vmx               571 tools/testing/selftests/powerpc/ptrace/ptrace.h int write_vmx_ckpt(pid_t child, unsigned long vmx[][2])
vmx               577 tools/testing/selftests/powerpc/ptrace/ptrace.h 	memcpy(regs, vmx, sizeof(regs));