new_cr3 1459 arch/x86/include/asm/kvm_host.h void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush); new_cr3 4362 arch/x86/kvm/mmu.c static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3, new_cr3 4375 arch/x86/kvm/mmu.c if (new_cr3 == root.cr3 && VALID_PAGE(root.hpa) && new_cr3 4387 arch/x86/kvm/mmu.c static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3, new_cr3 4400 arch/x86/kvm/mmu.c if (mmu_check_root(vcpu, new_cr3 >> PAGE_SHIFT)) new_cr3 4403 arch/x86/kvm/mmu.c if (cached_root_available(vcpu, new_cr3, new_role)) { new_cr3 4436 arch/x86/kvm/mmu.c static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, new_cr3 4440 arch/x86/kvm/mmu.c if (!fast_cr3_switch(vcpu, new_cr3, new_role, skip_tlb_flush)) new_cr3 4445 arch/x86/kvm/mmu.c void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush) new_cr3 4447 arch/x86/kvm/mmu.c __kvm_mmu_new_cr3(vcpu, new_cr3, kvm_mmu_calc_root_page_role(vcpu),