kvm_mmu           580 arch/x86/include/asm/kvm_host.h 	struct kvm_mmu *mmu;
kvm_mmu           583 arch/x86/include/asm/kvm_host.h 	struct kvm_mmu root_mmu;
kvm_mmu           586 arch/x86/include/asm/kvm_host.h 	struct kvm_mmu guest_mmu;
kvm_mmu           596 arch/x86/include/asm/kvm_host.h 	struct kvm_mmu nested_mmu;
kvm_mmu           602 arch/x86/include/asm/kvm_host.h 	struct kvm_mmu *walk_mmu;
kvm_mmu          1284 arch/x86/include/asm/kvm_host.h int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
kvm_mmu          1405 arch/x86/include/asm/kvm_host.h int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
kvm_mmu          1438 arch/x86/include/asm/kvm_host.h void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
kvm_mmu          3717 arch/x86/kvm/mmu.c void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
kvm_mmu          4016 arch/x86/kvm/mmu.c static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
kvm_mmu          4021 arch/x86/kvm/mmu.c static bool is_shadow_zero_bits_set(struct kvm_mmu *mmu, u64 spte, int level)
kvm_mmu          4341 arch/x86/kvm/mmu.c 				   struct kvm_mmu *context)
kvm_mmu          4367 arch/x86/kvm/mmu.c 	struct kvm_mmu *mmu = vcpu->arch.mmu;
kvm_mmu          4391 arch/x86/kvm/mmu.c 	struct kvm_mmu *mmu = vcpu->arch.mmu;
kvm_mmu          4480 arch/x86/kvm/mmu.c static inline bool is_last_gpte(struct kvm_mmu *mmu,
kvm_mmu          4604 arch/x86/kvm/mmu.c 				  struct kvm_mmu *context)
kvm_mmu          4651 arch/x86/kvm/mmu.c 		struct kvm_mmu *context, bool execonly)
kvm_mmu          4663 arch/x86/kvm/mmu.c reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
kvm_mmu          4704 arch/x86/kvm/mmu.c 				struct kvm_mmu *context)
kvm_mmu          4737 arch/x86/kvm/mmu.c 				struct kvm_mmu *context, bool execonly)
kvm_mmu          4754 arch/x86/kvm/mmu.c 				      struct kvm_mmu *mmu, bool ept)
kvm_mmu          4849 arch/x86/kvm/mmu.c static void update_pkru_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
kvm_mmu          4900 arch/x86/kvm/mmu.c static void update_last_nonleaf_level(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
kvm_mmu          4910 arch/x86/kvm/mmu.c 					 struct kvm_mmu *context,
kvm_mmu          4932 arch/x86/kvm/mmu.c 				  struct kvm_mmu *context)
kvm_mmu          4941 arch/x86/kvm/mmu.c 				  struct kvm_mmu *context)
kvm_mmu          4961 arch/x86/kvm/mmu.c 				   struct kvm_mmu *context)
kvm_mmu          5018 arch/x86/kvm/mmu.c 	struct kvm_mmu *context = vcpu->arch.mmu;
kvm_mmu          5090 arch/x86/kvm/mmu.c 	struct kvm_mmu *context = vcpu->arch.mmu;
kvm_mmu          5144 arch/x86/kvm/mmu.c 	struct kvm_mmu *context = vcpu->arch.mmu;
kvm_mmu          5178 arch/x86/kvm/mmu.c 	struct kvm_mmu *context = vcpu->arch.mmu;
kvm_mmu          5190 arch/x86/kvm/mmu.c 	struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
kvm_mmu          5612 arch/x86/kvm/mmu.c 	struct kvm_mmu *mmu = vcpu->arch.mmu;
kvm_mmu          5643 arch/x86/kvm/mmu.c 	struct kvm_mmu *mmu = vcpu->arch.mmu;
kvm_mmu          5758 arch/x86/kvm/mmu.c static void free_mmu_pages(struct kvm_mmu *mmu)
kvm_mmu          5764 arch/x86/kvm/mmu.c static int alloc_mmu_pages(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
kvm_mmu            57 arch/x86/kvm/mmu.h reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
kvm_mmu           156 arch/x86/kvm/mmu.h static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
kvm_mmu           104 arch/x86/kvm/paging_tmpl.h static inline void FNAME(protect_clean_gpte)(struct kvm_mmu *mmu, unsigned *access,
kvm_mmu           131 arch/x86/kvm/paging_tmpl.h static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
kvm_mmu           221 arch/x86/kvm/paging_tmpl.h 					     struct kvm_mmu *mmu,
kvm_mmu           297 arch/x86/kvm/paging_tmpl.h 				    struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
kvm_mmu          5042 arch/x86/kvm/vmx/nested.c 	struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
kvm_mmu          2851 arch/x86/kvm/vmx/vmx.c 	struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
kvm_mmu          2867 arch/x86/kvm/vmx/vmx.c 	struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
kvm_mmu           663 arch/x86/kvm/x86.c int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
kvm_mmu           698 arch/x86/kvm/x86.c int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)