Lines Matching refs:arch
301 return vcpu->arch.efer & EFER_NX; in is_nx()
713 r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, in mmu_topup_memory_caches()
717 r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8); in mmu_topup_memory_caches()
720 r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, in mmu_topup_memory_caches()
728 mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, in mmu_free_memory_caches()
730 mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache); in mmu_free_memory_caches()
731 mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache, in mmu_free_memory_caches()
746 return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache); in mmu_alloc_pte_list_desc()
781 return &slot->arch.lpage_info[level - 2][idx]; in lpage_info_slot()
796 kvm->arch.indirect_shadow_pages++; in account_shadowed()
812 kvm->arch.indirect_shadow_pages--; in unaccount_shadowed()
1016 return &slot->arch.rmap[level - PT_PAGE_TABLE_LEVEL][idx]; in __gfn_to_rmap()
1034 cache = &vcpu->arch.mmu_pte_list_desc_cache; in rmap_can_add()
1610 kvm->arch.n_used_mmu_pages += nr; in kvm_mod_used_mmu_pages()
1657 sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); in kvm_mmu_alloc_page()
1658 sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); in kvm_mmu_alloc_page()
1660 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); in kvm_mmu_alloc_page()
1668 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); in kvm_mmu_alloc_page()
1817 &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
1836 if (vcpu->arch.mmu.sync_page(vcpu, sp)) { in __kvm_sync_page()
1885 (vcpu->arch.mmu.sync_page(vcpu, s))) { in kvm_sync_pages()
2007 return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen); in is_obsolete_sp()
2023 role = vcpu->arch.mmu.base_role; in kvm_mmu_get_page()
2029 if (!vcpu->arch.mmu.direct_map in kvm_mmu_get_page()
2030 && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { in kvm_mmu_get_page()
2066 &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]); in kvm_mmu_get_page()
2075 sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen; in kvm_mmu_get_page()
2085 iterator->shadow_addr = vcpu->arch.mmu.root_hpa; in shadow_walk_init()
2086 iterator->level = vcpu->arch.mmu.shadow_root_level; in shadow_walk_init()
2089 vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL && in shadow_walk_init()
2090 !vcpu->arch.mmu.direct_map) in shadow_walk_init()
2095 = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; in shadow_walk_init()
2264 list_move(&sp->link, &kvm->arch.active_mmu_pages); in kvm_mmu_prepare_zap_page()
2309 if (list_empty(&kvm->arch.active_mmu_pages)) in prepare_zap_oldest_mmu_page()
2312 sp = list_entry(kvm->arch.active_mmu_pages.prev, in prepare_zap_oldest_mmu_page()
2329 if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) { in kvm_mmu_change_mmu_pages()
2331 while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) in kvm_mmu_change_mmu_pages()
2336 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages; in kvm_mmu_change_mmu_pages()
2339 kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages; in kvm_mmu_change_mmu_pages()
2463 mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT, in kvm_get_guest_memory_type()
2744 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in __direct_map()
2931 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in fast_page_fault()
3067 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in mmu_free_roots()
3070 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL && in mmu_free_roots()
3071 (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL || in mmu_free_roots()
3072 vcpu->arch.mmu.direct_map)) { in mmu_free_roots()
3073 hpa_t root = vcpu->arch.mmu.root_hpa; in mmu_free_roots()
3083 vcpu->arch.mmu.root_hpa = INVALID_PAGE; in mmu_free_roots()
3089 hpa_t root = vcpu->arch.mmu.pae_root[i]; in mmu_free_roots()
3099 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; in mmu_free_roots()
3103 vcpu->arch.mmu.root_hpa = INVALID_PAGE; in mmu_free_roots()
3123 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { in mmu_alloc_direct_roots()
3130 vcpu->arch.mmu.root_hpa = __pa(sp->spt); in mmu_alloc_direct_roots()
3131 } else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) { in mmu_alloc_direct_roots()
3133 hpa_t root = vcpu->arch.mmu.pae_root[i]; in mmu_alloc_direct_roots()
3145 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; in mmu_alloc_direct_roots()
3147 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); in mmu_alloc_direct_roots()
3161 root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT; in mmu_alloc_shadow_roots()
3170 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { in mmu_alloc_shadow_roots()
3171 hpa_t root = vcpu->arch.mmu.root_hpa; in mmu_alloc_shadow_roots()
3182 vcpu->arch.mmu.root_hpa = root; in mmu_alloc_shadow_roots()
3192 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) in mmu_alloc_shadow_roots()
3196 hpa_t root = vcpu->arch.mmu.pae_root[i]; in mmu_alloc_shadow_roots()
3199 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) { in mmu_alloc_shadow_roots()
3200 pdptr = vcpu->arch.mmu.get_pdptr(vcpu, i); in mmu_alloc_shadow_roots()
3202 vcpu->arch.mmu.pae_root[i] = 0; in mmu_alloc_shadow_roots()
3218 vcpu->arch.mmu.pae_root[i] = root | pm_mask; in mmu_alloc_shadow_roots()
3220 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); in mmu_alloc_shadow_roots()
3226 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { in mmu_alloc_shadow_roots()
3227 if (vcpu->arch.mmu.lm_root == NULL) { in mmu_alloc_shadow_roots()
3239 lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask; in mmu_alloc_shadow_roots()
3241 vcpu->arch.mmu.lm_root = lm_root; in mmu_alloc_shadow_roots()
3244 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root); in mmu_alloc_shadow_roots()
3252 if (vcpu->arch.mmu.direct_map) in mmu_alloc_roots()
3263 if (vcpu->arch.mmu.direct_map) in mmu_sync_roots()
3266 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in mmu_sync_roots()
3271 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { in mmu_sync_roots()
3272 hpa_t root = vcpu->arch.mmu.root_hpa; in mmu_sync_roots()
3279 hpa_t root = vcpu->arch.mmu.pae_root[i]; in mmu_sync_roots()
3312 return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception); in nonpaging_gva_to_gpa_nested()
3328 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in walk_shadow_page_get_mmio_spte()
3401 MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); in nonpaging_page_fault()
3411 struct kvm_arch_async_pf arch; in kvm_arch_setup_async_pf() local
3413 arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; in kvm_arch_setup_async_pf()
3414 arch.gfn = gfn; in kvm_arch_setup_async_pf()
3415 arch.direct_map = vcpu->arch.mmu.direct_map; in kvm_arch_setup_async_pf()
3416 arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu); in kvm_arch_setup_async_pf()
3418 return kvm_setup_async_pf(vcpu, gva, gfn_to_hva(vcpu->kvm, gfn), &arch); in kvm_arch_setup_async_pf()
3467 MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); in tdp_page_fault()
3545 vcpu->arch.mmu.inject_page_fault(vcpu, fault); in inject_page_fault()
3833 struct kvm_mmu *context = &vcpu->arch.mmu; in init_kvm_tdp_mmu()
3877 struct kvm_mmu *context = &vcpu->arch.mmu; in kvm_init_shadow_mmu()
3902 struct kvm_mmu *context = &vcpu->arch.mmu; in kvm_init_shadow_ept_mmu()
3925 struct kvm_mmu *context = &vcpu->arch.mmu; in init_kvm_softmmu()
3936 struct kvm_mmu *g_context = &vcpu->arch.nested_mmu; in init_kvm_nested_mmu()
4002 vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa); in kvm_mmu_load()
4011 WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); in kvm_mmu_unload()
4025 vcpu->arch.mmu.update_pte(vcpu, sp, spte, new); in mmu_pte_write_new_pte()
4185 if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages)) in kvm_mmu_pte_write()
4223 !((sp->role.word ^ vcpu->arch.mmu.base_role.word) in kvm_mmu_pte_write()
4242 if (vcpu->arch.mmu.direct_map) in kvm_mmu_unprotect_page_virt()
4271 if (vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu)) in is_mmio_page_fault()
4283 r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false); in kvm_mmu_page_fault()
4315 vcpu->arch.mmu.invlpg(vcpu, gva); in kvm_mmu_invlpg()
4335 free_page((unsigned long)vcpu->arch.mmu.pae_root); in free_mmu_pages()
4336 if (vcpu->arch.mmu.lm_root != NULL) in free_mmu_pages()
4337 free_page((unsigned long)vcpu->arch.mmu.lm_root); in free_mmu_pages()
4354 vcpu->arch.mmu.pae_root = page_address(page); in alloc_mmu_pages()
4356 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; in alloc_mmu_pages()
4363 vcpu->arch.walk_mmu = &vcpu->arch.mmu; in kvm_mmu_create()
4364 vcpu->arch.mmu.root_hpa = INVALID_PAGE; in kvm_mmu_create()
4365 vcpu->arch.mmu.translate_gpa = translate_gpa; in kvm_mmu_create()
4366 vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa; in kvm_mmu_create()
4373 MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); in kvm_mmu_setup()
4394 rmapp = memslot->arch.rmap[i - PT_PAGE_TABLE_LEVEL]; in kvm_mmu_slot_remove_write_access()
4475 rmapp = memslot->arch.rmap[0]; in kvm_mmu_zap_collapsible_sptes()
4510 rmapp = memslot->arch.rmap[PT_PAGE_TABLE_LEVEL - 1]; in kvm_mmu_slot_leaf_clear_dirty()
4553 rmapp = memslot->arch.rmap[i - PT_PAGE_TABLE_LEVEL]; in kvm_mmu_slot_largepage_remove_write_access()
4591 rmapp = memslot->arch.rmap[i - PT_PAGE_TABLE_LEVEL]; in kvm_mmu_slot_set_dirty()
4621 &kvm->arch.active_mmu_pages, link) { in kvm_zap_obsolete_pages()
4650 &kvm->arch.zapped_obsolete_pages); in kvm_zap_obsolete_pages()
4661 kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages); in kvm_zap_obsolete_pages()
4677 kvm->arch.mmu_valid_gen++; in kvm_mmu_invalidate_zap_all_pages()
4696 return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages)); in kvm_has_zapped_obsolete_pages()
4738 if (!kvm->arch.n_used_mmu_pages && in mmu_shrink_scan()
4747 &kvm->arch.zapped_obsolete_pages); in mmu_shrink_scan()
4846 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in kvm_mmu_get_spte_hierarchy()