walk_mmu          602 arch/x86/include/asm/kvm_host.h 	struct kvm_mmu *walk_mmu;
walk_mmu           86 arch/x86/kvm/kvm_cache_regs.h 	return vcpu->arch.walk_mmu->pdptrs[index];
walk_mmu         5798 arch/x86/kvm/mmu.c 	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
walk_mmu          492 arch/x86/kvm/paging_tmpl.h 	walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
walk_mmu         2416 arch/x86/kvm/svm.c 		load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
walk_mmu         3040 arch/x86/kvm/svm.c 	vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
walk_mmu         3046 arch/x86/kvm/svm.c 	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
walk_mmu          361 arch/x86/kvm/vmx/nested.c 	vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
walk_mmu          367 arch/x86/kvm/vmx/nested.c 	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
walk_mmu         1004 arch/x86/kvm/vmx/nested.c 			if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))) {
walk_mmu         2442 arch/x86/kvm/vmx/nested.c 		vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
walk_mmu         3839 arch/x86/kvm/vmx/nested.c 		vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
walk_mmu         5042 arch/x86/kvm/vmx/nested.c 	struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
walk_mmu         2851 arch/x86/kvm/vmx/vmx.c 	struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
walk_mmu         2867 arch/x86/kvm/vmx/vmx.c 	struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
walk_mmu          685 arch/x86/kvm/x86.c 	return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
walk_mmu          735 arch/x86/kvm/x86.c 	u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
walk_mmu          754 arch/x86/kvm/x86.c 	changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
walk_mmu          793 arch/x86/kvm/x86.c 		if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
walk_mmu          985 arch/x86/kvm/x86.c 		   && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
walk_mmu         1036 arch/x86/kvm/x86.c 		 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
walk_mmu         5360 arch/x86/kvm/x86.c 	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
walk_mmu         5368 arch/x86/kvm/x86.c 	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
walk_mmu         5376 arch/x86/kvm/x86.c 	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
walk_mmu         5383 arch/x86/kvm/x86.c 	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
walk_mmu         5394 arch/x86/kvm/x86.c 		gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
walk_mmu         5428 arch/x86/kvm/x86.c 	gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK,
walk_mmu         5492 arch/x86/kvm/x86.c 		gpa_t gpa =  vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
walk_mmu         5593 arch/x86/kvm/x86.c 	    && !permission_fault(vcpu, vcpu->arch.walk_mmu,
walk_mmu         5601 arch/x86/kvm/x86.c 	*gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
walk_mmu         8911 arch/x86/kvm/x86.c 		load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
walk_mmu          124 arch/x86/kvm/x86.h 	return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;