Lines Matching refs:mmu

1907 	if (vcpu->arch.mmu.sync_page(vcpu, sp)) {  in __kvm_sync_page()
1956 (vcpu->arch.mmu.sync_page(vcpu, s))) { in kvm_sync_pages()
2094 role = vcpu->arch.mmu.base_role; in kvm_mmu_get_page()
2100 if (!vcpu->arch.mmu.direct_map in kvm_mmu_get_page()
2101 && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { in kvm_mmu_get_page()
2156 iterator->shadow_addr = vcpu->arch.mmu.root_hpa; in shadow_walk_init()
2157 iterator->level = vcpu->arch.mmu.shadow_root_level; in shadow_walk_init()
2160 vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL && in shadow_walk_init()
2161 !vcpu->arch.mmu.direct_map) in shadow_walk_init()
2166 = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; in shadow_walk_init()
2720 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in __direct_map()
2907 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in fast_page_fault()
3041 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in mmu_free_roots()
3044 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL && in mmu_free_roots()
3045 (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL || in mmu_free_roots()
3046 vcpu->arch.mmu.direct_map)) { in mmu_free_roots()
3047 hpa_t root = vcpu->arch.mmu.root_hpa; in mmu_free_roots()
3057 vcpu->arch.mmu.root_hpa = INVALID_PAGE; in mmu_free_roots()
3063 hpa_t root = vcpu->arch.mmu.pae_root[i]; in mmu_free_roots()
3073 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; in mmu_free_roots()
3077 vcpu->arch.mmu.root_hpa = INVALID_PAGE; in mmu_free_roots()
3097 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { in mmu_alloc_direct_roots()
3104 vcpu->arch.mmu.root_hpa = __pa(sp->spt); in mmu_alloc_direct_roots()
3105 } else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) { in mmu_alloc_direct_roots()
3107 hpa_t root = vcpu->arch.mmu.pae_root[i]; in mmu_alloc_direct_roots()
3119 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; in mmu_alloc_direct_roots()
3121 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); in mmu_alloc_direct_roots()
3135 root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT; in mmu_alloc_shadow_roots()
3144 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { in mmu_alloc_shadow_roots()
3145 hpa_t root = vcpu->arch.mmu.root_hpa; in mmu_alloc_shadow_roots()
3156 vcpu->arch.mmu.root_hpa = root; in mmu_alloc_shadow_roots()
3166 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) in mmu_alloc_shadow_roots()
3170 hpa_t root = vcpu->arch.mmu.pae_root[i]; in mmu_alloc_shadow_roots()
3173 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) { in mmu_alloc_shadow_roots()
3174 pdptr = vcpu->arch.mmu.get_pdptr(vcpu, i); in mmu_alloc_shadow_roots()
3176 vcpu->arch.mmu.pae_root[i] = 0; in mmu_alloc_shadow_roots()
3192 vcpu->arch.mmu.pae_root[i] = root | pm_mask; in mmu_alloc_shadow_roots()
3194 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); in mmu_alloc_shadow_roots()
3200 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { in mmu_alloc_shadow_roots()
3201 if (vcpu->arch.mmu.lm_root == NULL) { in mmu_alloc_shadow_roots()
3213 lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask; in mmu_alloc_shadow_roots()
3215 vcpu->arch.mmu.lm_root = lm_root; in mmu_alloc_shadow_roots()
3218 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root); in mmu_alloc_shadow_roots()
3226 if (vcpu->arch.mmu.direct_map) in mmu_alloc_roots()
3237 if (vcpu->arch.mmu.direct_map) in mmu_sync_roots()
3240 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in mmu_sync_roots()
3245 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { in mmu_sync_roots()
3246 hpa_t root = vcpu->arch.mmu.root_hpa; in mmu_sync_roots()
3253 hpa_t root = vcpu->arch.mmu.pae_root[i]; in mmu_sync_roots()
3298 static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level) in is_rsvd_bits_set() argument
3300 return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level); in is_rsvd_bits_set()
3303 static bool is_shadow_zero_bits_set(struct kvm_mmu *mmu, u64 spte, int level) in is_shadow_zero_bits_set() argument
3305 return __is_rsvd_bits_set(&mmu->shadow_zero_check, spte, level); in is_shadow_zero_bits_set()
3325 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in walk_shadow_page_get_mmio_spte()
3342 reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte, in walk_shadow_page_get_mmio_spte()
3416 MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); in nonpaging_page_fault()
3430 arch.direct_map = vcpu->arch.mmu.direct_map; in kvm_arch_setup_async_pf()
3431 arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu); in kvm_arch_setup_async_pf()
3493 MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); in tdp_page_fault()
3574 vcpu->arch.mmu.inject_page_fault(vcpu, fault); in inject_page_fault()
3594 static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned gpte) in is_last_gpte() argument
3600 return mmu->last_pte_bitmap & (1 << index); in is_last_gpte()
3810 struct kvm_mmu *mmu, bool ept) in update_permission_bitmask() argument
3818 for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) { in update_permission_bitmask()
3837 x |= !mmu->nx; in update_permission_bitmask()
3870 mmu->permissions[byte] = map; in update_permission_bitmask()
3874 static void update_last_pte_bitmap(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) in update_last_pte_bitmap() argument
3877 unsigned level, root_level = mmu->root_level; in update_last_pte_bitmap()
3886 && (mmu->root_level >= PT32E_ROOT_LEVEL || is_pse(vcpu))) in update_last_pte_bitmap()
3889 mmu->last_pte_bitmap = map; in update_last_pte_bitmap()
3948 struct kvm_mmu *context = &vcpu->arch.mmu; in init_kvm_tdp_mmu()
3994 struct kvm_mmu *context = &vcpu->arch.mmu; in kvm_init_shadow_mmu()
4021 struct kvm_mmu *context = &vcpu->arch.mmu; in kvm_init_shadow_ept_mmu()
4045 struct kvm_mmu *context = &vcpu->arch.mmu; in init_kvm_softmmu()
4122 vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa); in kvm_mmu_load()
4131 WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); in kvm_mmu_unload()
4145 vcpu->arch.mmu.update_pte(vcpu, sp, spte, new); in mmu_pte_write_new_pte()
4344 !((sp->role.word ^ vcpu->arch.mmu.base_role.word) in kvm_mmu_pte_write()
4363 if (vcpu->arch.mmu.direct_map) in kvm_mmu_unprotect_page_virt()
4392 if (vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu)) in is_mmio_page_fault()
4404 r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false); in kvm_mmu_page_fault()
4436 vcpu->arch.mmu.invlpg(vcpu, gva); in kvm_mmu_invlpg()
4456 free_page((unsigned long)vcpu->arch.mmu.pae_root); in free_mmu_pages()
4457 if (vcpu->arch.mmu.lm_root != NULL) in free_mmu_pages()
4458 free_page((unsigned long)vcpu->arch.mmu.lm_root); in free_mmu_pages()
4475 vcpu->arch.mmu.pae_root = page_address(page); in alloc_mmu_pages()
4477 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; in alloc_mmu_pages()
4484 vcpu->arch.walk_mmu = &vcpu->arch.mmu; in kvm_mmu_create()
4485 vcpu->arch.mmu.root_hpa = INVALID_PAGE; in kvm_mmu_create()
4486 vcpu->arch.mmu.translate_gpa = translate_gpa; in kvm_mmu_create()
4494 MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); in kvm_mmu_setup()