Lines Matching refs:vcpu
179 static void mmu_free_roots(struct kvm_vcpu *vcpu);
226 static unsigned int kvm_current_mmio_generation(struct kvm_vcpu *vcpu) in kvm_current_mmio_generation() argument
228 return kvm_vcpu_memslots(vcpu)->generation & MMIO_GEN_MASK; in kvm_current_mmio_generation()
231 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, in mark_mmio_spte() argument
234 unsigned int gen = kvm_current_mmio_generation(vcpu); in mark_mmio_spte()
261 static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, in set_mmio_spte() argument
265 mark_mmio_spte(vcpu, sptep, gfn, access); in set_mmio_spte()
272 static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte) in check_mmio_spte() argument
276 kvm_gen = kvm_current_mmio_generation(vcpu); in check_mmio_spte()
299 static int is_nx(struct kvm_vcpu *vcpu) in is_nx() argument
301 return vcpu->arch.efer & EFER_NX; in is_nx()
632 static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu) in walk_shadow_page_lockless_begin() argument
639 vcpu->mode = READING_SHADOW_PAGE_TABLES; in walk_shadow_page_lockless_begin()
647 static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu) in walk_shadow_page_lockless_end() argument
655 vcpu->mode = OUTSIDE_GUEST_MODE; in walk_shadow_page_lockless_end()
709 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) in mmu_topup_memory_caches() argument
713 r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, in mmu_topup_memory_caches()
717 r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8); in mmu_topup_memory_caches()
720 r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, in mmu_topup_memory_caches()
726 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) in mmu_free_memory_caches() argument
728 mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, in mmu_free_memory_caches()
730 mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache); in mmu_free_memory_caches()
731 mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache, in mmu_free_memory_caches()
744 static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu) in mmu_alloc_pte_list_desc() argument
746 return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache); in mmu_alloc_pte_list_desc()
834 static int has_wrprotected_page(struct kvm_vcpu *vcpu, gfn_t gfn, int level) in has_wrprotected_page() argument
838 slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in has_wrprotected_page()
871 gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, in gfn_to_memslot_dirty_bitmap() argument
876 slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in gfn_to_memslot_dirty_bitmap()
883 static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn, in mapping_level() argument
892 slot = kvm_vcpu_gfn_to_memslot(vcpu, large_gfn); in mapping_level()
897 host_level = host_mapping_level(vcpu->kvm, large_gfn); in mapping_level()
923 static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte, in pte_list_add() argument
934 desc = mmu_alloc_pte_list_desc(vcpu); in pte_list_add()
947 desc->more = mmu_alloc_pte_list_desc(vcpu); in pte_list_add()
1057 static bool rmap_can_add(struct kvm_vcpu *vcpu) in rmap_can_add() argument
1061 cache = &vcpu->arch.mmu_pte_list_desc_cache; in rmap_can_add()
1065 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) in rmap_add() argument
1072 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp); in rmap_add()
1073 return pte_list_add(vcpu, spte, rmapp); in rmap_add()
1174 static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) in drop_large_spte() argument
1176 if (__drop_large_spte(vcpu->kvm, sptep)) in drop_large_spte()
1177 kvm_flush_remote_tlbs(vcpu->kvm); in drop_large_spte()
1342 static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) in rmap_write_protect() argument
1349 slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in rmap_write_protect()
1353 write_protected |= __rmap_write_protect(vcpu->kvm, rmapp, true); in rmap_write_protect()
1614 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) in rmap_recycle() argument
1621 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp); in rmap_recycle()
1623 kvm_unmap_rmapp(vcpu->kvm, rmapp, NULL, gfn, sp->role.level, 0); in rmap_recycle()
1624 kvm_flush_remote_tlbs(vcpu->kvm); in rmap_recycle()
1701 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu, in mmu_page_add_parent_pte() argument
1707 pte_list_add(vcpu, parent_pte, &sp->parent_ptes); in mmu_page_add_parent_pte()
1723 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, in kvm_mmu_alloc_page() argument
1728 sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); in kvm_mmu_alloc_page()
1729 sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); in kvm_mmu_alloc_page()
1731 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); in kvm_mmu_alloc_page()
1739 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); in kvm_mmu_alloc_page()
1741 mmu_page_add_parent_pte(vcpu, sp, parent_pte); in kvm_mmu_alloc_page()
1742 kvm_mod_used_mmu_pages(vcpu->kvm, +1); in kvm_mmu_alloc_page()
1766 static int nonpaging_sync_page(struct kvm_vcpu *vcpu, in nonpaging_sync_page() argument
1772 static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva) in nonpaging_invlpg() argument
1776 static void nonpaging_update_pte(struct kvm_vcpu *vcpu, in nonpaging_update_pte() argument
1896 static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, in __kvm_sync_page() argument
1899 if (sp->role.cr4_pae != !!is_pae(vcpu)) { in __kvm_sync_page()
1900 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); in __kvm_sync_page()
1905 kvm_unlink_unsync_page(vcpu->kvm, sp); in __kvm_sync_page()
1907 if (vcpu->arch.mmu.sync_page(vcpu, sp)) { in __kvm_sync_page()
1908 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); in __kvm_sync_page()
1912 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in __kvm_sync_page()
1916 static int kvm_sync_page_transient(struct kvm_vcpu *vcpu, in kvm_sync_page_transient() argument
1922 ret = __kvm_sync_page(vcpu, sp, &invalid_list, false); in kvm_sync_page_transient()
1924 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in kvm_sync_page_transient()
1932 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { } in kvm_mmu_audit() argument
1936 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, in kvm_sync_page() argument
1939 return __kvm_sync_page(vcpu, sp, invalid_list, true); in kvm_sync_page()
1943 static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_sync_pages() argument
1949 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { in kvm_sync_pages()
1954 kvm_unlink_unsync_page(vcpu->kvm, s); in kvm_sync_pages()
1955 if ((s->role.cr4_pae != !!is_pae(vcpu)) || in kvm_sync_pages()
1956 (vcpu->arch.mmu.sync_page(vcpu, s))) { in kvm_sync_pages()
1957 kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list); in kvm_sync_pages()
1963 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in kvm_sync_pages()
1965 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in kvm_sync_pages()
2027 static void mmu_sync_children(struct kvm_vcpu *vcpu, in mmu_sync_children() argument
2041 protected |= rmap_write_protect(vcpu, sp->gfn); in mmu_sync_children()
2044 kvm_flush_remote_tlbs(vcpu->kvm); in mmu_sync_children()
2047 kvm_sync_page(vcpu, sp, &invalid_list); in mmu_sync_children()
2050 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in mmu_sync_children()
2051 cond_resched_lock(&vcpu->kvm->mmu_lock); in mmu_sync_children()
2081 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, in kvm_mmu_get_page() argument
2094 role = vcpu->arch.mmu.base_role; in kvm_mmu_get_page()
2100 if (!vcpu->arch.mmu.direct_map in kvm_mmu_get_page()
2101 && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { in kvm_mmu_get_page()
2106 for_each_gfn_sp(vcpu->kvm, sp, gfn) { in kvm_mmu_get_page()
2107 if (is_obsolete_sp(vcpu->kvm, sp)) in kvm_mmu_get_page()
2116 if (sp->unsync && kvm_sync_page_transient(vcpu, sp)) in kvm_mmu_get_page()
2119 mmu_page_add_parent_pte(vcpu, sp, parent_pte); in kvm_mmu_get_page()
2121 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); in kvm_mmu_get_page()
2130 ++vcpu->kvm->stat.mmu_cache_miss; in kvm_mmu_get_page()
2131 sp = kvm_mmu_alloc_page(vcpu, parent_pte, direct); in kvm_mmu_get_page()
2137 &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]); in kvm_mmu_get_page()
2139 if (rmap_write_protect(vcpu, gfn)) in kvm_mmu_get_page()
2140 kvm_flush_remote_tlbs(vcpu->kvm); in kvm_mmu_get_page()
2142 kvm_sync_pages(vcpu, gfn); in kvm_mmu_get_page()
2144 account_shadowed(vcpu->kvm, sp); in kvm_mmu_get_page()
2146 sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen; in kvm_mmu_get_page()
2153 struct kvm_vcpu *vcpu, u64 addr) in shadow_walk_init() argument
2156 iterator->shadow_addr = vcpu->arch.mmu.root_hpa; in shadow_walk_init()
2157 iterator->level = vcpu->arch.mmu.shadow_root_level; in shadow_walk_init()
2160 vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL && in shadow_walk_init()
2161 !vcpu->arch.mmu.direct_map) in shadow_walk_init()
2166 = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; in shadow_walk_init()
2217 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, in validate_direct_spte() argument
2235 kvm_flush_remote_tlbs(vcpu->kvm); in validate_direct_spte()
2437 static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) in __kvm_unsync_page() argument
2440 ++vcpu->kvm->stat.mmu_unsync; in __kvm_unsync_page()
2446 static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_unsync_pages() argument
2450 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { in kvm_unsync_pages()
2454 __kvm_unsync_page(vcpu, s); in kvm_unsync_pages()
2458 static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, in mmu_need_write_protect() argument
2464 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { in mmu_need_write_protect()
2475 kvm_unsync_pages(vcpu, gfn); in mmu_need_write_protect()
2487 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, in set_spte() argument
2495 if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access)) in set_spte()
2513 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, in set_spte()
2532 has_wrprotected_page(vcpu, gfn, level)) in set_spte()
2546 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { in set_spte()
2556 kvm_vcpu_mark_page_dirty(vcpu, gfn); in set_spte()
2562 kvm_flush_remote_tlbs(vcpu->kvm); in set_spte()
2567 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, in mmu_set_spte() argument
2590 kvm_flush_remote_tlbs(vcpu->kvm); in mmu_set_spte()
2594 drop_spte(vcpu->kvm, sptep); in mmu_set_spte()
2595 kvm_flush_remote_tlbs(vcpu->kvm); in mmu_set_spte()
2600 if (set_spte(vcpu, sptep, pte_access, level, gfn, pfn, speculative, in mmu_set_spte()
2604 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in mmu_set_spte()
2616 ++vcpu->kvm->stat.lpages; in mmu_set_spte()
2620 rmap_count = rmap_add(vcpu, sptep, gfn); in mmu_set_spte()
2622 rmap_recycle(vcpu, sptep, gfn); in mmu_set_spte()
2629 static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, in pte_prefetch_gfn_to_pfn() argument
2634 slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log); in pte_prefetch_gfn_to_pfn()
2641 static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, in direct_pte_prefetch_many() argument
2652 slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK); in direct_pte_prefetch_many()
2661 mmu_set_spte(vcpu, start, access, 0, NULL, in direct_pte_prefetch_many()
2668 static void __direct_pte_prefetch(struct kvm_vcpu *vcpu, in __direct_pte_prefetch() argument
2683 if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0) in __direct_pte_prefetch()
2691 static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) in direct_pte_prefetch() argument
2708 __direct_pte_prefetch(vcpu, sp, sptep); in direct_pte_prefetch()
2711 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, in __direct_map() argument
2720 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in __direct_map()
2723 for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) { in __direct_map()
2725 mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, in __direct_map()
2728 direct_pte_prefetch(vcpu, iterator.sptep); in __direct_map()
2729 ++vcpu->stat.pf_fixed; in __direct_map()
2733 drop_large_spte(vcpu, iterator.sptep); in __direct_map()
2739 sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr, in __direct_map()
2762 static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn) in kvm_handle_bad_page() argument
2774 kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current); in kvm_handle_bad_page()
2781 static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, in transparent_hugepage_adjust() argument
2797 !has_wrprotected_page(vcpu, gfn, PT_DIRECTORY_LEVEL)) { in transparent_hugepage_adjust()
2822 static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, in handle_abnormal_pfn() argument
2829 *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn); in handle_abnormal_pfn()
2834 vcpu_cache_mmio_info(vcpu, gva, gfn, access); in handle_abnormal_pfn()
2863 fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, in fast_pf_fix_direct_spte() argument
2889 kvm_vcpu_mark_page_dirty(vcpu, gfn); in fast_pf_fix_direct_spte()
2899 static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, in fast_page_fault() argument
2907 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in fast_page_fault()
2913 walk_shadow_page_lockless_begin(vcpu); in fast_page_fault()
2914 for_each_shadow_entry_lockless(vcpu, gva, iterator, spte) in fast_page_fault()
2967 ret = fast_pf_fix_direct_spte(vcpu, sp, iterator.sptep, spte); in fast_page_fault()
2969 trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep, in fast_page_fault()
2971 walk_shadow_page_lockless_end(vcpu); in fast_page_fault()
2976 static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
2978 static void make_mmu_pages_available(struct kvm_vcpu *vcpu);
2980 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, in nonpaging_map() argument
2990 level = mapping_level(vcpu, gfn, &force_pt_level); in nonpaging_map()
3003 if (fast_page_fault(vcpu, v, level, error_code)) in nonpaging_map()
3006 mmu_seq = vcpu->kvm->mmu_notifier_seq; in nonpaging_map()
3009 if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable)) in nonpaging_map()
3012 if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r)) in nonpaging_map()
3015 spin_lock(&vcpu->kvm->mmu_lock); in nonpaging_map()
3016 if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) in nonpaging_map()
3018 make_mmu_pages_available(vcpu); in nonpaging_map()
3020 transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); in nonpaging_map()
3021 r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn, in nonpaging_map()
3023 spin_unlock(&vcpu->kvm->mmu_lock); in nonpaging_map()
3029 spin_unlock(&vcpu->kvm->mmu_lock); in nonpaging_map()
3035 static void mmu_free_roots(struct kvm_vcpu *vcpu) in mmu_free_roots() argument
3041 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in mmu_free_roots()
3044 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL && in mmu_free_roots()
3045 (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL || in mmu_free_roots()
3046 vcpu->arch.mmu.direct_map)) { in mmu_free_roots()
3047 hpa_t root = vcpu->arch.mmu.root_hpa; in mmu_free_roots()
3049 spin_lock(&vcpu->kvm->mmu_lock); in mmu_free_roots()
3053 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); in mmu_free_roots()
3054 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in mmu_free_roots()
3056 spin_unlock(&vcpu->kvm->mmu_lock); in mmu_free_roots()
3057 vcpu->arch.mmu.root_hpa = INVALID_PAGE; in mmu_free_roots()
3061 spin_lock(&vcpu->kvm->mmu_lock); in mmu_free_roots()
3063 hpa_t root = vcpu->arch.mmu.pae_root[i]; in mmu_free_roots()
3070 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, in mmu_free_roots()
3073 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; in mmu_free_roots()
3075 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in mmu_free_roots()
3076 spin_unlock(&vcpu->kvm->mmu_lock); in mmu_free_roots()
3077 vcpu->arch.mmu.root_hpa = INVALID_PAGE; in mmu_free_roots()
3080 static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn) in mmu_check_root() argument
3084 if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) { in mmu_check_root()
3085 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); in mmu_check_root()
3092 static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) in mmu_alloc_direct_roots() argument
3097 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { in mmu_alloc_direct_roots()
3098 spin_lock(&vcpu->kvm->mmu_lock); in mmu_alloc_direct_roots()
3099 make_mmu_pages_available(vcpu); in mmu_alloc_direct_roots()
3100 sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL, in mmu_alloc_direct_roots()
3103 spin_unlock(&vcpu->kvm->mmu_lock); in mmu_alloc_direct_roots()
3104 vcpu->arch.mmu.root_hpa = __pa(sp->spt); in mmu_alloc_direct_roots()
3105 } else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) { in mmu_alloc_direct_roots()
3107 hpa_t root = vcpu->arch.mmu.pae_root[i]; in mmu_alloc_direct_roots()
3110 spin_lock(&vcpu->kvm->mmu_lock); in mmu_alloc_direct_roots()
3111 make_mmu_pages_available(vcpu); in mmu_alloc_direct_roots()
3112 sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT), in mmu_alloc_direct_roots()
3118 spin_unlock(&vcpu->kvm->mmu_lock); in mmu_alloc_direct_roots()
3119 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; in mmu_alloc_direct_roots()
3121 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); in mmu_alloc_direct_roots()
3128 static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) in mmu_alloc_shadow_roots() argument
3135 root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT; in mmu_alloc_shadow_roots()
3137 if (mmu_check_root(vcpu, root_gfn)) in mmu_alloc_shadow_roots()
3144 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { in mmu_alloc_shadow_roots()
3145 hpa_t root = vcpu->arch.mmu.root_hpa; in mmu_alloc_shadow_roots()
3149 spin_lock(&vcpu->kvm->mmu_lock); in mmu_alloc_shadow_roots()
3150 make_mmu_pages_available(vcpu); in mmu_alloc_shadow_roots()
3151 sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL, in mmu_alloc_shadow_roots()
3155 spin_unlock(&vcpu->kvm->mmu_lock); in mmu_alloc_shadow_roots()
3156 vcpu->arch.mmu.root_hpa = root; in mmu_alloc_shadow_roots()
3166 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) in mmu_alloc_shadow_roots()
3170 hpa_t root = vcpu->arch.mmu.pae_root[i]; in mmu_alloc_shadow_roots()
3173 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) { in mmu_alloc_shadow_roots()
3174 pdptr = vcpu->arch.mmu.get_pdptr(vcpu, i); in mmu_alloc_shadow_roots()
3176 vcpu->arch.mmu.pae_root[i] = 0; in mmu_alloc_shadow_roots()
3180 if (mmu_check_root(vcpu, root_gfn)) in mmu_alloc_shadow_roots()
3183 spin_lock(&vcpu->kvm->mmu_lock); in mmu_alloc_shadow_roots()
3184 make_mmu_pages_available(vcpu); in mmu_alloc_shadow_roots()
3185 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, in mmu_alloc_shadow_roots()
3190 spin_unlock(&vcpu->kvm->mmu_lock); in mmu_alloc_shadow_roots()
3192 vcpu->arch.mmu.pae_root[i] = root | pm_mask; in mmu_alloc_shadow_roots()
3194 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); in mmu_alloc_shadow_roots()
3200 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { in mmu_alloc_shadow_roots()
3201 if (vcpu->arch.mmu.lm_root == NULL) { in mmu_alloc_shadow_roots()
3213 lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask; in mmu_alloc_shadow_roots()
3215 vcpu->arch.mmu.lm_root = lm_root; in mmu_alloc_shadow_roots()
3218 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root); in mmu_alloc_shadow_roots()
3224 static int mmu_alloc_roots(struct kvm_vcpu *vcpu) in mmu_alloc_roots() argument
3226 if (vcpu->arch.mmu.direct_map) in mmu_alloc_roots()
3227 return mmu_alloc_direct_roots(vcpu); in mmu_alloc_roots()
3229 return mmu_alloc_shadow_roots(vcpu); in mmu_alloc_roots()
3232 static void mmu_sync_roots(struct kvm_vcpu *vcpu) in mmu_sync_roots() argument
3237 if (vcpu->arch.mmu.direct_map) in mmu_sync_roots()
3240 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in mmu_sync_roots()
3243 vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY); in mmu_sync_roots()
3244 kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); in mmu_sync_roots()
3245 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { in mmu_sync_roots()
3246 hpa_t root = vcpu->arch.mmu.root_hpa; in mmu_sync_roots()
3248 mmu_sync_children(vcpu, sp); in mmu_sync_roots()
3249 kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); in mmu_sync_roots()
3253 hpa_t root = vcpu->arch.mmu.pae_root[i]; in mmu_sync_roots()
3258 mmu_sync_children(vcpu, sp); in mmu_sync_roots()
3261 kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); in mmu_sync_roots()
3264 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) in kvm_mmu_sync_roots() argument
3266 spin_lock(&vcpu->kvm->mmu_lock); in kvm_mmu_sync_roots()
3267 mmu_sync_roots(vcpu); in kvm_mmu_sync_roots()
3268 spin_unlock(&vcpu->kvm->mmu_lock); in kvm_mmu_sync_roots()
3272 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr, in nonpaging_gva_to_gpa() argument
3280 static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr, in nonpaging_gva_to_gpa_nested() argument
3286 return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception); in nonpaging_gva_to_gpa_nested()
3308 static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct) in quickly_check_mmio_pf() argument
3311 return vcpu_match_mmio_gpa(vcpu, addr); in quickly_check_mmio_pf()
3313 return vcpu_match_mmio_gva(vcpu, addr); in quickly_check_mmio_pf()
3318 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) in walk_shadow_page_get_mmio_spte() argument
3325 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in walk_shadow_page_get_mmio_spte()
3328 walk_shadow_page_lockless_begin(vcpu); in walk_shadow_page_get_mmio_spte()
3330 for (shadow_walk_init(&iterator, vcpu, addr), in walk_shadow_page_get_mmio_spte()
3342 reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte, in walk_shadow_page_get_mmio_spte()
3346 walk_shadow_page_lockless_end(vcpu); in walk_shadow_page_get_mmio_spte()
3362 int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct) in handle_mmio_page_fault() argument
3367 if (quickly_check_mmio_pf(vcpu, addr, direct)) in handle_mmio_page_fault()
3370 reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte); in handle_mmio_page_fault()
3378 if (!check_mmio_spte(vcpu, spte)) in handle_mmio_page_fault()
3385 vcpu_cache_mmio_info(vcpu, addr, gfn, access); in handle_mmio_page_fault()
3397 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, in nonpaging_page_fault() argument
3406 r = handle_mmio_page_fault(vcpu, gva, true); in nonpaging_page_fault()
3412 r = mmu_topup_memory_caches(vcpu); in nonpaging_page_fault()
3416 MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); in nonpaging_page_fault()
3420 return nonpaging_map(vcpu, gva & PAGE_MASK, in nonpaging_page_fault()
3424 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) in kvm_arch_setup_async_pf() argument
3428 arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; in kvm_arch_setup_async_pf()
3430 arch.direct_map = vcpu->arch.mmu.direct_map; in kvm_arch_setup_async_pf()
3431 arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu); in kvm_arch_setup_async_pf()
3433 return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); in kvm_arch_setup_async_pf()
3436 static bool can_do_async_pf(struct kvm_vcpu *vcpu) in can_do_async_pf() argument
3438 if (unlikely(!lapic_in_kernel(vcpu) || in can_do_async_pf()
3439 kvm_event_needs_reinjection(vcpu))) in can_do_async_pf()
3442 return kvm_x86_ops->interrupt_allowed(vcpu); in can_do_async_pf()
3445 static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, in try_async_pf() argument
3451 slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in try_async_pf()
3457 if (!prefault && can_do_async_pf(vcpu)) { in try_async_pf()
3459 if (kvm_find_async_pf_gfn(vcpu, gfn)) { in try_async_pf()
3461 kvm_make_request(KVM_REQ_APF_HALT, vcpu); in try_async_pf()
3463 } else if (kvm_arch_setup_async_pf(vcpu, gva, gfn)) in try_async_pf()
3472 check_hugepage_cache_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int level) in check_hugepage_cache_consistency() argument
3478 return kvm_mtrr_check_gfn_range_consistency(vcpu, gfn, page_num); in check_hugepage_cache_consistency()
3481 static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, in tdp_page_fault() argument
3493 MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); in tdp_page_fault()
3496 r = handle_mmio_page_fault(vcpu, gpa, true); in tdp_page_fault()
3502 r = mmu_topup_memory_caches(vcpu); in tdp_page_fault()
3506 force_pt_level = !check_hugepage_cache_consistency(vcpu, gfn, in tdp_page_fault()
3508 level = mapping_level(vcpu, gfn, &force_pt_level); in tdp_page_fault()
3511 !check_hugepage_cache_consistency(vcpu, gfn, level)) in tdp_page_fault()
3516 if (fast_page_fault(vcpu, gpa, level, error_code)) in tdp_page_fault()
3519 mmu_seq = vcpu->kvm->mmu_notifier_seq; in tdp_page_fault()
3522 if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable)) in tdp_page_fault()
3525 if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r)) in tdp_page_fault()
3528 spin_lock(&vcpu->kvm->mmu_lock); in tdp_page_fault()
3529 if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) in tdp_page_fault()
3531 make_mmu_pages_available(vcpu); in tdp_page_fault()
3533 transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); in tdp_page_fault()
3534 r = __direct_map(vcpu, gpa, write, map_writable, in tdp_page_fault()
3536 spin_unlock(&vcpu->kvm->mmu_lock); in tdp_page_fault()
3541 spin_unlock(&vcpu->kvm->mmu_lock); in tdp_page_fault()
3546 static void nonpaging_init_context(struct kvm_vcpu *vcpu, in nonpaging_init_context() argument
3561 void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu) in kvm_mmu_new_cr3() argument
3563 mmu_free_roots(vcpu); in kvm_mmu_new_cr3()
3566 static unsigned long get_cr3(struct kvm_vcpu *vcpu) in get_cr3() argument
3568 return kvm_read_cr3(vcpu); in get_cr3()
3571 static void inject_page_fault(struct kvm_vcpu *vcpu, in inject_page_fault() argument
3574 vcpu->arch.mmu.inject_page_fault(vcpu, fault); in inject_page_fault()
3577 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, in sync_mmio_spte() argument
3587 mark_mmio_spte(vcpu, sptep, gfn, access); in sync_mmio_spte()
3617 __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, in __reset_rsvds_bits_mask() argument
3699 static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, in reset_rsvds_bits_mask() argument
3702 __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check, in reset_rsvds_bits_mask()
3703 cpuid_maxphyaddr(vcpu), context->root_level, in reset_rsvds_bits_mask()
3704 context->nx, guest_cpuid_has_gbpages(vcpu), in reset_rsvds_bits_mask()
3705 is_pse(vcpu), guest_cpuid_is_amd(vcpu)); in reset_rsvds_bits_mask()
3742 static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, in reset_rsvds_bits_mask_ept() argument
3746 cpuid_maxphyaddr(vcpu), execonly); in reset_rsvds_bits_mask_ept()
3755 reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) in reset_shadow_zero_bits_mask() argument
3763 __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check, in reset_shadow_zero_bits_mask()
3766 guest_cpuid_has_gbpages(vcpu), is_pse(vcpu), in reset_shadow_zero_bits_mask()
3782 reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, in reset_tdp_shadow_zero_bits_mask() argument
3786 __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check, in reset_tdp_shadow_zero_bits_mask()
3802 reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, in reset_ept_shadow_zero_bits_mask() argument
3809 static void update_permission_bitmask(struct kvm_vcpu *vcpu, in update_permission_bitmask() argument
3816 cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); in update_permission_bitmask()
3817 cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP); in update_permission_bitmask()
3839 w |= !is_write_protection(vcpu) && !uf; in update_permission_bitmask()
3874 static void update_last_pte_bitmap(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) in update_last_pte_bitmap() argument
3886 && (mmu->root_level >= PT32E_ROOT_LEVEL || is_pse(vcpu))) in update_last_pte_bitmap()
3892 static void paging64_init_context_common(struct kvm_vcpu *vcpu, in paging64_init_context_common() argument
3896 context->nx = is_nx(vcpu); in paging64_init_context_common()
3899 reset_rsvds_bits_mask(vcpu, context); in paging64_init_context_common()
3900 update_permission_bitmask(vcpu, context, false); in paging64_init_context_common()
3901 update_last_pte_bitmap(vcpu, context); in paging64_init_context_common()
3903 MMU_WARN_ON(!is_pae(vcpu)); in paging64_init_context_common()
3914 static void paging64_init_context(struct kvm_vcpu *vcpu, in paging64_init_context() argument
3917 paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL); in paging64_init_context()
3920 static void paging32_init_context(struct kvm_vcpu *vcpu, in paging32_init_context() argument
3926 reset_rsvds_bits_mask(vcpu, context); in paging32_init_context()
3927 update_permission_bitmask(vcpu, context, false); in paging32_init_context()
3928 update_last_pte_bitmap(vcpu, context); in paging32_init_context()
3940 static void paging32E_init_context(struct kvm_vcpu *vcpu, in paging32E_init_context() argument
3943 paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL); in paging32E_init_context()
3946 static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) in init_kvm_tdp_mmu() argument
3948 struct kvm_mmu *context = &vcpu->arch.mmu; in init_kvm_tdp_mmu()
3951 context->base_role.smm = is_smm(vcpu); in init_kvm_tdp_mmu()
3964 if (!is_paging(vcpu)) { in init_kvm_tdp_mmu()
3968 } else if (is_long_mode(vcpu)) { in init_kvm_tdp_mmu()
3969 context->nx = is_nx(vcpu); in init_kvm_tdp_mmu()
3971 reset_rsvds_bits_mask(vcpu, context); in init_kvm_tdp_mmu()
3973 } else if (is_pae(vcpu)) { in init_kvm_tdp_mmu()
3974 context->nx = is_nx(vcpu); in init_kvm_tdp_mmu()
3976 reset_rsvds_bits_mask(vcpu, context); in init_kvm_tdp_mmu()
3981 reset_rsvds_bits_mask(vcpu, context); in init_kvm_tdp_mmu()
3985 update_permission_bitmask(vcpu, context, false); in init_kvm_tdp_mmu()
3986 update_last_pte_bitmap(vcpu, context); in init_kvm_tdp_mmu()
3987 reset_tdp_shadow_zero_bits_mask(vcpu, context); in init_kvm_tdp_mmu()
3990 void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) in kvm_init_shadow_mmu() argument
3992 bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); in kvm_init_shadow_mmu()
3993 bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP); in kvm_init_shadow_mmu()
3994 struct kvm_mmu *context = &vcpu->arch.mmu; in kvm_init_shadow_mmu()
3998 if (!is_paging(vcpu)) in kvm_init_shadow_mmu()
3999 nonpaging_init_context(vcpu, context); in kvm_init_shadow_mmu()
4000 else if (is_long_mode(vcpu)) in kvm_init_shadow_mmu()
4001 paging64_init_context(vcpu, context); in kvm_init_shadow_mmu()
4002 else if (is_pae(vcpu)) in kvm_init_shadow_mmu()
4003 paging32E_init_context(vcpu, context); in kvm_init_shadow_mmu()
4005 paging32_init_context(vcpu, context); in kvm_init_shadow_mmu()
4007 context->base_role.nxe = is_nx(vcpu); in kvm_init_shadow_mmu()
4008 context->base_role.cr4_pae = !!is_pae(vcpu); in kvm_init_shadow_mmu()
4009 context->base_role.cr0_wp = is_write_protection(vcpu); in kvm_init_shadow_mmu()
4011 = smep && !is_write_protection(vcpu); in kvm_init_shadow_mmu()
4013 = smap && !is_write_protection(vcpu); in kvm_init_shadow_mmu()
4014 context->base_role.smm = is_smm(vcpu); in kvm_init_shadow_mmu()
4015 reset_shadow_zero_bits_mask(vcpu, context); in kvm_init_shadow_mmu()
4019 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly) in kvm_init_shadow_ept_mmu() argument
4021 struct kvm_mmu *context = &vcpu->arch.mmu; in kvm_init_shadow_ept_mmu()
4037 update_permission_bitmask(vcpu, context, true); in kvm_init_shadow_ept_mmu()
4038 reset_rsvds_bits_mask_ept(vcpu, context, execonly); in kvm_init_shadow_ept_mmu()
4039 reset_ept_shadow_zero_bits_mask(vcpu, context, execonly); in kvm_init_shadow_ept_mmu()
4043 static void init_kvm_softmmu(struct kvm_vcpu *vcpu) in init_kvm_softmmu() argument
4045 struct kvm_mmu *context = &vcpu->arch.mmu; in init_kvm_softmmu()
4047 kvm_init_shadow_mmu(vcpu); in init_kvm_softmmu()
4054 static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu) in init_kvm_nested_mmu() argument
4056 struct kvm_mmu *g_context = &vcpu->arch.nested_mmu; in init_kvm_nested_mmu()
4068 if (!is_paging(vcpu)) { in init_kvm_nested_mmu()
4072 } else if (is_long_mode(vcpu)) { in init_kvm_nested_mmu()
4073 g_context->nx = is_nx(vcpu); in init_kvm_nested_mmu()
4075 reset_rsvds_bits_mask(vcpu, g_context); in init_kvm_nested_mmu()
4077 } else if (is_pae(vcpu)) { in init_kvm_nested_mmu()
4078 g_context->nx = is_nx(vcpu); in init_kvm_nested_mmu()
4080 reset_rsvds_bits_mask(vcpu, g_context); in init_kvm_nested_mmu()
4085 reset_rsvds_bits_mask(vcpu, g_context); in init_kvm_nested_mmu()
4089 update_permission_bitmask(vcpu, g_context, false); in init_kvm_nested_mmu()
4090 update_last_pte_bitmap(vcpu, g_context); in init_kvm_nested_mmu()
4093 static void init_kvm_mmu(struct kvm_vcpu *vcpu) in init_kvm_mmu() argument
4095 if (mmu_is_nested(vcpu)) in init_kvm_mmu()
4096 init_kvm_nested_mmu(vcpu); in init_kvm_mmu()
4098 init_kvm_tdp_mmu(vcpu); in init_kvm_mmu()
4100 init_kvm_softmmu(vcpu); in init_kvm_mmu()
4103 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu) in kvm_mmu_reset_context() argument
4105 kvm_mmu_unload(vcpu); in kvm_mmu_reset_context()
4106 init_kvm_mmu(vcpu); in kvm_mmu_reset_context()
4110 int kvm_mmu_load(struct kvm_vcpu *vcpu) in kvm_mmu_load() argument
4114 r = mmu_topup_memory_caches(vcpu); in kvm_mmu_load()
4117 r = mmu_alloc_roots(vcpu); in kvm_mmu_load()
4118 kvm_mmu_sync_roots(vcpu); in kvm_mmu_load()
4122 vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa); in kvm_mmu_load()
4128 void kvm_mmu_unload(struct kvm_vcpu *vcpu) in kvm_mmu_unload() argument
4130 mmu_free_roots(vcpu); in kvm_mmu_unload()
4131 WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); in kvm_mmu_unload()
4135 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, in mmu_pte_write_new_pte() argument
4140 ++vcpu->kvm->stat.mmu_pde_zapped; in mmu_pte_write_new_pte()
4144 ++vcpu->kvm->stat.mmu_pte_updated; in mmu_pte_write_new_pte()
4145 vcpu->arch.mmu.update_pte(vcpu, sp, spte, new); in mmu_pte_write_new_pte()
4161 static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page, in mmu_pte_write_flush_tlb() argument
4168 kvm_flush_remote_tlbs(vcpu->kvm); in mmu_pte_write_flush_tlb()
4170 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in mmu_pte_write_flush_tlb()
4173 static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa, in mmu_pte_write_fetch_gpte() argument
4184 if (is_pae(vcpu) && *bytes == 4) { in mmu_pte_write_fetch_gpte()
4188 r = kvm_vcpu_read_guest(vcpu, *gpa, &gentry, 8); in mmu_pte_write_fetch_gpte()
4284 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, in kvm_mmu_pte_write() argument
4306 if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages)) in kvm_mmu_pte_write()
4313 gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, new, &bytes); in kvm_mmu_pte_write()
4320 mmu_topup_memory_caches(vcpu); in kvm_mmu_pte_write()
4322 spin_lock(&vcpu->kvm->mmu_lock); in kvm_mmu_pte_write()
4323 ++vcpu->kvm->stat.mmu_pte_write; in kvm_mmu_pte_write()
4324 kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); in kvm_mmu_pte_write()
4326 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { in kvm_mmu_pte_write()
4329 zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp, in kvm_mmu_pte_write()
4331 ++vcpu->kvm->stat.mmu_flooded; in kvm_mmu_pte_write()
4342 mmu_page_zap_pte(vcpu->kvm, sp, spte); in kvm_mmu_pte_write()
4344 !((sp->role.word ^ vcpu->arch.mmu.base_role.word) in kvm_mmu_pte_write()
4345 & mask.word) && rmap_can_add(vcpu)) in kvm_mmu_pte_write()
4346 mmu_pte_write_new_pte(vcpu, sp, spte, &gentry); in kvm_mmu_pte_write()
4352 mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush); in kvm_mmu_pte_write()
4353 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in kvm_mmu_pte_write()
4354 kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE); in kvm_mmu_pte_write()
4355 spin_unlock(&vcpu->kvm->mmu_lock); in kvm_mmu_pte_write()
4358 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) in kvm_mmu_unprotect_page_virt() argument
4363 if (vcpu->arch.mmu.direct_map) in kvm_mmu_unprotect_page_virt()
4366 gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); in kvm_mmu_unprotect_page_virt()
4368 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); in kvm_mmu_unprotect_page_virt()
4374 static void make_mmu_pages_available(struct kvm_vcpu *vcpu) in make_mmu_pages_available() argument
4378 if (likely(kvm_mmu_available_pages(vcpu->kvm) >= KVM_MIN_FREE_MMU_PAGES)) in make_mmu_pages_available()
4381 while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) { in make_mmu_pages_available()
4382 if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list)) in make_mmu_pages_available()
4385 ++vcpu->kvm->stat.mmu_recycled; in make_mmu_pages_available()
4387 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in make_mmu_pages_available()
4390 static bool is_mmio_page_fault(struct kvm_vcpu *vcpu, gva_t addr) in is_mmio_page_fault() argument
4392 if (vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu)) in is_mmio_page_fault()
4393 return vcpu_match_mmio_gpa(vcpu, addr); in is_mmio_page_fault()
4395 return vcpu_match_mmio_gva(vcpu, addr); in is_mmio_page_fault()
4398 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code, in kvm_mmu_page_fault() argument
4404 r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false); in kvm_mmu_page_fault()
4413 if (is_mmio_page_fault(vcpu, cr2)) in kvm_mmu_page_fault()
4416 er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len); in kvm_mmu_page_fault()
4422 ++vcpu->stat.mmio_exits; in kvm_mmu_page_fault()
4434 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) in kvm_mmu_invlpg() argument
4436 vcpu->arch.mmu.invlpg(vcpu, gva); in kvm_mmu_invlpg()
4437 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in kvm_mmu_invlpg()
4438 ++vcpu->stat.invlpg; in kvm_mmu_invlpg()
4454 static void free_mmu_pages(struct kvm_vcpu *vcpu) in free_mmu_pages() argument
4456 free_page((unsigned long)vcpu->arch.mmu.pae_root); in free_mmu_pages()
4457 if (vcpu->arch.mmu.lm_root != NULL) in free_mmu_pages()
4458 free_page((unsigned long)vcpu->arch.mmu.lm_root); in free_mmu_pages()
4461 static int alloc_mmu_pages(struct kvm_vcpu *vcpu) in alloc_mmu_pages() argument
4475 vcpu->arch.mmu.pae_root = page_address(page); in alloc_mmu_pages()
4477 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; in alloc_mmu_pages()
4482 int kvm_mmu_create(struct kvm_vcpu *vcpu) in kvm_mmu_create() argument
4484 vcpu->arch.walk_mmu = &vcpu->arch.mmu; in kvm_mmu_create()
4485 vcpu->arch.mmu.root_hpa = INVALID_PAGE; in kvm_mmu_create()
4486 vcpu->arch.mmu.translate_gpa = translate_gpa; in kvm_mmu_create()
4487 vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa; in kvm_mmu_create()
4489 return alloc_mmu_pages(vcpu); in kvm_mmu_create()
4492 void kvm_mmu_setup(struct kvm_vcpu *vcpu) in kvm_mmu_setup() argument
4494 MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); in kvm_mmu_setup()
4496 init_kvm_mmu(vcpu); in kvm_mmu_setup()
4961 void kvm_mmu_destroy(struct kvm_vcpu *vcpu) in kvm_mmu_destroy() argument
4963 kvm_mmu_unload(vcpu); in kvm_mmu_destroy()
4964 free_mmu_pages(vcpu); in kvm_mmu_destroy()
4965 mmu_free_memory_caches(vcpu); in kvm_mmu_destroy()