kvm_state        1199 arch/x86/include/asm/kvm_host.h 				struct kvm_nested_state *kvm_state);
kvm_state        5512 arch/x86/kvm/vmx/nested.c 	struct kvm_nested_state kvm_state = {
kvm_state        5515 arch/x86/kvm/vmx/nested.c 		.size = sizeof(kvm_state),
kvm_state        5523 arch/x86/kvm/vmx/nested.c 		return kvm_state.size + sizeof(*user_vmx_nested_state);
kvm_state        5530 arch/x86/kvm/vmx/nested.c 		kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
kvm_state        5531 arch/x86/kvm/vmx/nested.c 		kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr;
kvm_state        5534 arch/x86/kvm/vmx/nested.c 			kvm_state.size += sizeof(user_vmx_nested_state->vmcs12);
kvm_state        5537 arch/x86/kvm/vmx/nested.c 				kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
kvm_state        5542 arch/x86/kvm/vmx/nested.c 				kvm_state.size += sizeof(user_vmx_nested_state->shadow_vmcs12);
kvm_state        5546 arch/x86/kvm/vmx/nested.c 			kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
kvm_state        5549 arch/x86/kvm/vmx/nested.c 			kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
kvm_state        5552 arch/x86/kvm/vmx/nested.c 			kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
kvm_state        5555 arch/x86/kvm/vmx/nested.c 				kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
kvm_state        5559 arch/x86/kvm/vmx/nested.c 	if (user_data_size < kvm_state.size)
kvm_state        5562 arch/x86/kvm/vmx/nested.c 	if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
kvm_state        5603 arch/x86/kvm/vmx/nested.c 	return kvm_state.size;
kvm_state        5620 arch/x86/kvm/vmx/nested.c 				struct kvm_nested_state *kvm_state)
kvm_state        5629 arch/x86/kvm/vmx/nested.c 	if (kvm_state->format != KVM_STATE_NESTED_FORMAT_VMX)
kvm_state        5632 arch/x86/kvm/vmx/nested.c 	if (kvm_state->hdr.vmx.vmxon_pa == -1ull) {
kvm_state        5633 arch/x86/kvm/vmx/nested.c 		if (kvm_state->hdr.vmx.smm.flags)
kvm_state        5636 arch/x86/kvm/vmx/nested.c 		if (kvm_state->hdr.vmx.vmcs12_pa != -1ull)
kvm_state        5648 arch/x86/kvm/vmx/nested.c 		if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS)
kvm_state        5654 arch/x86/kvm/vmx/nested.c 		if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa))
kvm_state        5658 arch/x86/kvm/vmx/nested.c 	if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
kvm_state        5659 arch/x86/kvm/vmx/nested.c 	    (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
kvm_state        5662 arch/x86/kvm/vmx/nested.c 	if (kvm_state->hdr.vmx.smm.flags &
kvm_state        5672 arch/x86/kvm/vmx/nested.c 		(kvm_state->flags &
kvm_state        5674 arch/x86/kvm/vmx/nested.c 		: kvm_state->hdr.vmx.smm.flags)
kvm_state        5677 arch/x86/kvm/vmx/nested.c 	if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
kvm_state        5678 arch/x86/kvm/vmx/nested.c 	    !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
kvm_state        5681 arch/x86/kvm/vmx/nested.c 	if ((kvm_state->flags & KVM_STATE_NESTED_EVMCS) &&
kvm_state        5687 arch/x86/kvm/vmx/nested.c 	if (kvm_state->hdr.vmx.vmxon_pa == -1ull)
kvm_state        5690 arch/x86/kvm/vmx/nested.c 	vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa;
kvm_state        5696 arch/x86/kvm/vmx/nested.c 	if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12))
kvm_state        5699 arch/x86/kvm/vmx/nested.c 	if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) {
kvm_state        5700 arch/x86/kvm/vmx/nested.c 		if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa ||
kvm_state        5701 arch/x86/kvm/vmx/nested.c 		    !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa))
kvm_state        5704 arch/x86/kvm/vmx/nested.c 		set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa);
kvm_state        5705 arch/x86/kvm/vmx/nested.c 	} else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) {
kvm_state        5715 arch/x86/kvm/vmx/nested.c 	if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
kvm_state        5719 arch/x86/kvm/vmx/nested.c 		if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
kvm_state        5730 arch/x86/kvm/vmx/nested.c 	if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
kvm_state        5734 arch/x86/kvm/vmx/nested.c 		!!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
kvm_state        5741 arch/x86/kvm/vmx/nested.c 		if (kvm_state->size <
kvm_state        5742 arch/x86/kvm/vmx/nested.c 		    sizeof(*kvm_state) +
kvm_state        4495 arch/x86/kvm/x86.c 		struct kvm_nested_state kvm_state;
kvm_state        4503 arch/x86/kvm/x86.c 		if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state)))
kvm_state        4507 arch/x86/kvm/x86.c 		if (kvm_state.size < sizeof(kvm_state))
kvm_state        4510 arch/x86/kvm/x86.c 		if (kvm_state.flags &
kvm_state        4516 arch/x86/kvm/x86.c 		if ((kvm_state.flags & KVM_STATE_NESTED_RUN_PENDING)
kvm_state        4517 arch/x86/kvm/x86.c 		    && !(kvm_state.flags & KVM_STATE_NESTED_GUEST_MODE))
kvm_state        4521 arch/x86/kvm/x86.c 		r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state);