Lines Matching refs:arch

301 	return vcpu->arch.efer & EFER_NX;  in is_nx()
713 r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, in mmu_topup_memory_caches()
717 r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8); in mmu_topup_memory_caches()
720 r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, in mmu_topup_memory_caches()
728 mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, in mmu_free_memory_caches()
730 mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache); in mmu_free_memory_caches()
731 mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache, in mmu_free_memory_caches()
746 return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache); in mmu_alloc_pte_list_desc()
781 return &slot->arch.lpage_info[level - 2][idx]; in lpage_info_slot()
799 kvm->arch.indirect_shadow_pages++; in account_shadowed()
818 kvm->arch.indirect_shadow_pages--; in unaccount_shadowed()
1041 return &slot->arch.rmap[level - PT_PAGE_TABLE_LEVEL][idx]; in __gfn_to_rmap()
1061 cache = &vcpu->arch.mmu_pte_list_desc_cache; in rmap_can_add()
1681 kvm->arch.n_used_mmu_pages += nr; in kvm_mod_used_mmu_pages()
1728 sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); in kvm_mmu_alloc_page()
1729 sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); in kvm_mmu_alloc_page()
1731 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); in kvm_mmu_alloc_page()
1739 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); in kvm_mmu_alloc_page()
1888 &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
1907 if (vcpu->arch.mmu.sync_page(vcpu, sp)) { in __kvm_sync_page()
1956 (vcpu->arch.mmu.sync_page(vcpu, s))) { in kvm_sync_pages()
2078 return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen); in is_obsolete_sp()
2094 role = vcpu->arch.mmu.base_role; in kvm_mmu_get_page()
2100 if (!vcpu->arch.mmu.direct_map in kvm_mmu_get_page()
2101 && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { in kvm_mmu_get_page()
2137 &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]); in kvm_mmu_get_page()
2146 sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen; in kvm_mmu_get_page()
2156 iterator->shadow_addr = vcpu->arch.mmu.root_hpa; in shadow_walk_init()
2157 iterator->level = vcpu->arch.mmu.shadow_root_level; in shadow_walk_init()
2160 vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL && in shadow_walk_init()
2161 !vcpu->arch.mmu.direct_map) in shadow_walk_init()
2166 = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; in shadow_walk_init()
2335 list_move(&sp->link, &kvm->arch.active_mmu_pages); in kvm_mmu_prepare_zap_page()
2380 if (list_empty(&kvm->arch.active_mmu_pages)) in prepare_zap_oldest_mmu_page()
2383 sp = list_entry(kvm->arch.active_mmu_pages.prev, in prepare_zap_oldest_mmu_page()
2400 if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) { in kvm_mmu_change_mmu_pages()
2402 while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) in kvm_mmu_change_mmu_pages()
2407 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages; in kvm_mmu_change_mmu_pages()
2410 kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages; in kvm_mmu_change_mmu_pages()
2720 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in __direct_map()
2907 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in fast_page_fault()
3041 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in mmu_free_roots()
3044 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL && in mmu_free_roots()
3045 (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL || in mmu_free_roots()
3046 vcpu->arch.mmu.direct_map)) { in mmu_free_roots()
3047 hpa_t root = vcpu->arch.mmu.root_hpa; in mmu_free_roots()
3057 vcpu->arch.mmu.root_hpa = INVALID_PAGE; in mmu_free_roots()
3063 hpa_t root = vcpu->arch.mmu.pae_root[i]; in mmu_free_roots()
3073 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; in mmu_free_roots()
3077 vcpu->arch.mmu.root_hpa = INVALID_PAGE; in mmu_free_roots()
3097 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { in mmu_alloc_direct_roots()
3104 vcpu->arch.mmu.root_hpa = __pa(sp->spt); in mmu_alloc_direct_roots()
3105 } else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) { in mmu_alloc_direct_roots()
3107 hpa_t root = vcpu->arch.mmu.pae_root[i]; in mmu_alloc_direct_roots()
3119 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; in mmu_alloc_direct_roots()
3121 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); in mmu_alloc_direct_roots()
3135 root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT; in mmu_alloc_shadow_roots()
3144 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { in mmu_alloc_shadow_roots()
3145 hpa_t root = vcpu->arch.mmu.root_hpa; in mmu_alloc_shadow_roots()
3156 vcpu->arch.mmu.root_hpa = root; in mmu_alloc_shadow_roots()
3166 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) in mmu_alloc_shadow_roots()
3170 hpa_t root = vcpu->arch.mmu.pae_root[i]; in mmu_alloc_shadow_roots()
3173 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) { in mmu_alloc_shadow_roots()
3174 pdptr = vcpu->arch.mmu.get_pdptr(vcpu, i); in mmu_alloc_shadow_roots()
3176 vcpu->arch.mmu.pae_root[i] = 0; in mmu_alloc_shadow_roots()
3192 vcpu->arch.mmu.pae_root[i] = root | pm_mask; in mmu_alloc_shadow_roots()
3194 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); in mmu_alloc_shadow_roots()
3200 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { in mmu_alloc_shadow_roots()
3201 if (vcpu->arch.mmu.lm_root == NULL) { in mmu_alloc_shadow_roots()
3213 lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask; in mmu_alloc_shadow_roots()
3215 vcpu->arch.mmu.lm_root = lm_root; in mmu_alloc_shadow_roots()
3218 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root); in mmu_alloc_shadow_roots()
3226 if (vcpu->arch.mmu.direct_map) in mmu_alloc_roots()
3237 if (vcpu->arch.mmu.direct_map) in mmu_sync_roots()
3240 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in mmu_sync_roots()
3245 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { in mmu_sync_roots()
3246 hpa_t root = vcpu->arch.mmu.root_hpa; in mmu_sync_roots()
3253 hpa_t root = vcpu->arch.mmu.pae_root[i]; in mmu_sync_roots()
3286 return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception); in nonpaging_gva_to_gpa_nested()
3325 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in walk_shadow_page_get_mmio_spte()
3342 reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte, in walk_shadow_page_get_mmio_spte()
3416 MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); in nonpaging_page_fault()
3426 struct kvm_arch_async_pf arch; in kvm_arch_setup_async_pf() local
3428 arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; in kvm_arch_setup_async_pf()
3429 arch.gfn = gfn; in kvm_arch_setup_async_pf()
3430 arch.direct_map = vcpu->arch.mmu.direct_map; in kvm_arch_setup_async_pf()
3431 arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu); in kvm_arch_setup_async_pf()
3433 return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); in kvm_arch_setup_async_pf()
3493 MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); in tdp_page_fault()
3574 vcpu->arch.mmu.inject_page_fault(vcpu, fault); in inject_page_fault()
3948 struct kvm_mmu *context = &vcpu->arch.mmu; in init_kvm_tdp_mmu()
3994 struct kvm_mmu *context = &vcpu->arch.mmu; in kvm_init_shadow_mmu()
4021 struct kvm_mmu *context = &vcpu->arch.mmu; in kvm_init_shadow_ept_mmu()
4045 struct kvm_mmu *context = &vcpu->arch.mmu; in init_kvm_softmmu()
4056 struct kvm_mmu *g_context = &vcpu->arch.nested_mmu; in init_kvm_nested_mmu()
4122 vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa); in kvm_mmu_load()
4131 WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); in kvm_mmu_unload()
4145 vcpu->arch.mmu.update_pte(vcpu, sp, spte, new); in mmu_pte_write_new_pte()
4306 if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages)) in kvm_mmu_pte_write()
4344 !((sp->role.word ^ vcpu->arch.mmu.base_role.word) in kvm_mmu_pte_write()
4363 if (vcpu->arch.mmu.direct_map) in kvm_mmu_unprotect_page_virt()
4392 if (vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu)) in is_mmio_page_fault()
4404 r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false); in kvm_mmu_page_fault()
4436 vcpu->arch.mmu.invlpg(vcpu, gva); in kvm_mmu_invlpg()
4456 free_page((unsigned long)vcpu->arch.mmu.pae_root); in free_mmu_pages()
4457 if (vcpu->arch.mmu.lm_root != NULL) in free_mmu_pages()
4458 free_page((unsigned long)vcpu->arch.mmu.lm_root); in free_mmu_pages()
4475 vcpu->arch.mmu.pae_root = page_address(page); in alloc_mmu_pages()
4477 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; in alloc_mmu_pages()
4484 vcpu->arch.walk_mmu = &vcpu->arch.mmu; in kvm_mmu_create()
4485 vcpu->arch.mmu.root_hpa = INVALID_PAGE; in kvm_mmu_create()
4486 vcpu->arch.mmu.translate_gpa = translate_gpa; in kvm_mmu_create()
4487 vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa; in kvm_mmu_create()
4494 MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); in kvm_mmu_setup()
4739 &kvm->arch.active_mmu_pages, link) { in kvm_zap_obsolete_pages()
4768 &kvm->arch.zapped_obsolete_pages); in kvm_zap_obsolete_pages()
4779 kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages); in kvm_zap_obsolete_pages()
4795 kvm->arch.mmu_valid_gen++; in kvm_mmu_invalidate_zap_all_pages()
4814 return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages)); in kvm_has_zapped_obsolete_pages()
4856 if (!kvm->arch.n_used_mmu_pages && in mmu_shrink_scan()
4865 &kvm->arch.zapped_obsolete_pages); in mmu_shrink_scan()