Lines Matching refs:vcpu
179 static void mmu_free_roots(struct kvm_vcpu *vcpu);
299 static int is_nx(struct kvm_vcpu *vcpu) in is_nx() argument
301 return vcpu->arch.efer & EFER_NX; in is_nx()
632 static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu) in walk_shadow_page_lockless_begin() argument
639 vcpu->mode = READING_SHADOW_PAGE_TABLES; in walk_shadow_page_lockless_begin()
647 static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu) in walk_shadow_page_lockless_end() argument
655 vcpu->mode = OUTSIDE_GUEST_MODE; in walk_shadow_page_lockless_end()
709 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) in mmu_topup_memory_caches() argument
713 r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, in mmu_topup_memory_caches()
717 r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8); in mmu_topup_memory_caches()
720 r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, in mmu_topup_memory_caches()
726 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) in mmu_free_memory_caches() argument
728 mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, in mmu_free_memory_caches()
730 mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache); in mmu_free_memory_caches()
731 mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache, in mmu_free_memory_caches()
744 static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu) in mmu_alloc_pte_list_desc() argument
746 return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache); in mmu_alloc_pte_list_desc()
850 gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, in gfn_to_memslot_dirty_bitmap() argument
855 slot = gfn_to_memslot(vcpu->kvm, gfn); in gfn_to_memslot_dirty_bitmap()
863 static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn) in mapping_level_dirty_bitmap() argument
865 return !gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true); in mapping_level_dirty_bitmap()
868 static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) in mapping_level() argument
872 host_level = host_mapping_level(vcpu->kvm, large_gfn); in mapping_level()
880 if (has_wrprotected_page(vcpu->kvm, large_gfn, level)) in mapping_level()
898 static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte, in pte_list_add() argument
909 desc = mmu_alloc_pte_list_desc(vcpu); in pte_list_add()
922 desc->more = mmu_alloc_pte_list_desc(vcpu); in pte_list_add()
1030 static bool rmap_can_add(struct kvm_vcpu *vcpu) in rmap_can_add() argument
1034 cache = &vcpu->arch.mmu_pte_list_desc_cache; in rmap_can_add()
1038 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) in rmap_add() argument
1045 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); in rmap_add()
1046 return pte_list_add(vcpu, spte, rmapp); in rmap_add()
1142 static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) in drop_large_spte() argument
1144 if (__drop_large_spte(vcpu->kvm, sptep)) in drop_large_spte()
1145 kvm_flush_remote_tlbs(vcpu->kvm); in drop_large_spte()
1543 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) in rmap_recycle() argument
1550 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); in rmap_recycle()
1552 kvm_unmap_rmapp(vcpu->kvm, rmapp, NULL, gfn, sp->role.level, 0); in rmap_recycle()
1553 kvm_flush_remote_tlbs(vcpu->kvm); in rmap_recycle()
1630 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu, in mmu_page_add_parent_pte() argument
1636 pte_list_add(vcpu, parent_pte, &sp->parent_ptes); in mmu_page_add_parent_pte()
1652 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, in kvm_mmu_alloc_page() argument
1657 sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); in kvm_mmu_alloc_page()
1658 sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); in kvm_mmu_alloc_page()
1660 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); in kvm_mmu_alloc_page()
1668 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); in kvm_mmu_alloc_page()
1670 mmu_page_add_parent_pte(vcpu, sp, parent_pte); in kvm_mmu_alloc_page()
1671 kvm_mod_used_mmu_pages(vcpu->kvm, +1); in kvm_mmu_alloc_page()
1695 static int nonpaging_sync_page(struct kvm_vcpu *vcpu, in nonpaging_sync_page() argument
1701 static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva) in nonpaging_invlpg() argument
1705 static void nonpaging_update_pte(struct kvm_vcpu *vcpu, in nonpaging_update_pte() argument
1825 static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, in __kvm_sync_page() argument
1828 if (sp->role.cr4_pae != !!is_pae(vcpu)) { in __kvm_sync_page()
1829 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); in __kvm_sync_page()
1834 kvm_unlink_unsync_page(vcpu->kvm, sp); in __kvm_sync_page()
1836 if (vcpu->arch.mmu.sync_page(vcpu, sp)) { in __kvm_sync_page()
1837 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); in __kvm_sync_page()
1841 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in __kvm_sync_page()
1845 static int kvm_sync_page_transient(struct kvm_vcpu *vcpu, in kvm_sync_page_transient() argument
1851 ret = __kvm_sync_page(vcpu, sp, &invalid_list, false); in kvm_sync_page_transient()
1853 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in kvm_sync_page_transient()
1861 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { } in kvm_mmu_audit() argument
1865 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, in kvm_sync_page() argument
1868 return __kvm_sync_page(vcpu, sp, invalid_list, true); in kvm_sync_page()
1872 static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_sync_pages() argument
1878 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { in kvm_sync_pages()
1883 kvm_unlink_unsync_page(vcpu->kvm, s); in kvm_sync_pages()
1884 if ((s->role.cr4_pae != !!is_pae(vcpu)) || in kvm_sync_pages()
1885 (vcpu->arch.mmu.sync_page(vcpu, s))) { in kvm_sync_pages()
1886 kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list); in kvm_sync_pages()
1892 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in kvm_sync_pages()
1894 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in kvm_sync_pages()
1956 static void mmu_sync_children(struct kvm_vcpu *vcpu, in mmu_sync_children() argument
1970 protected |= rmap_write_protect(vcpu->kvm, sp->gfn); in mmu_sync_children()
1973 kvm_flush_remote_tlbs(vcpu->kvm); in mmu_sync_children()
1976 kvm_sync_page(vcpu, sp, &invalid_list); in mmu_sync_children()
1979 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in mmu_sync_children()
1980 cond_resched_lock(&vcpu->kvm->mmu_lock); in mmu_sync_children()
2010 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, in kvm_mmu_get_page() argument
2023 role = vcpu->arch.mmu.base_role; in kvm_mmu_get_page()
2029 if (!vcpu->arch.mmu.direct_map in kvm_mmu_get_page()
2030 && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { in kvm_mmu_get_page()
2035 for_each_gfn_sp(vcpu->kvm, sp, gfn) { in kvm_mmu_get_page()
2036 if (is_obsolete_sp(vcpu->kvm, sp)) in kvm_mmu_get_page()
2045 if (sp->unsync && kvm_sync_page_transient(vcpu, sp)) in kvm_mmu_get_page()
2048 mmu_page_add_parent_pte(vcpu, sp, parent_pte); in kvm_mmu_get_page()
2050 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); in kvm_mmu_get_page()
2059 ++vcpu->kvm->stat.mmu_cache_miss; in kvm_mmu_get_page()
2060 sp = kvm_mmu_alloc_page(vcpu, parent_pte, direct); in kvm_mmu_get_page()
2066 &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]); in kvm_mmu_get_page()
2068 if (rmap_write_protect(vcpu->kvm, gfn)) in kvm_mmu_get_page()
2069 kvm_flush_remote_tlbs(vcpu->kvm); in kvm_mmu_get_page()
2071 kvm_sync_pages(vcpu, gfn); in kvm_mmu_get_page()
2073 account_shadowed(vcpu->kvm, gfn); in kvm_mmu_get_page()
2075 sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen; in kvm_mmu_get_page()
2082 struct kvm_vcpu *vcpu, u64 addr) in shadow_walk_init() argument
2085 iterator->shadow_addr = vcpu->arch.mmu.root_hpa; in shadow_walk_init()
2086 iterator->level = vcpu->arch.mmu.shadow_root_level; in shadow_walk_init()
2089 vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL && in shadow_walk_init()
2090 !vcpu->arch.mmu.direct_map) in shadow_walk_init()
2095 = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; in shadow_walk_init()
2146 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, in validate_direct_spte() argument
2164 kvm_flush_remote_tlbs(vcpu->kvm); in validate_direct_spte()
2459 u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_get_guest_memory_type() argument
2463 mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT, in kvm_get_guest_memory_type()
2471 static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) in __kvm_unsync_page() argument
2474 ++vcpu->kvm->stat.mmu_unsync; in __kvm_unsync_page()
2480 static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_unsync_pages() argument
2484 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { in kvm_unsync_pages()
2488 __kvm_unsync_page(vcpu, s); in kvm_unsync_pages()
2492 static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, in mmu_need_write_protect() argument
2498 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { in mmu_need_write_protect()
2509 kvm_unsync_pages(vcpu, gfn); in mmu_need_write_protect()
2513 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, in set_spte() argument
2521 if (set_mmio_spte(vcpu->kvm, sptep, gfn, pfn, pte_access)) in set_spte()
2539 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, in set_spte()
2558 has_wrprotected_page(vcpu->kvm, gfn, level)) in set_spte()
2572 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { in set_spte()
2582 mark_page_dirty(vcpu->kvm, gfn); in set_spte()
2588 kvm_flush_remote_tlbs(vcpu->kvm); in set_spte()
2593 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, in mmu_set_spte() argument
2616 kvm_flush_remote_tlbs(vcpu->kvm); in mmu_set_spte()
2620 drop_spte(vcpu->kvm, sptep); in mmu_set_spte()
2621 kvm_flush_remote_tlbs(vcpu->kvm); in mmu_set_spte()
2626 if (set_spte(vcpu, sptep, pte_access, level, gfn, pfn, speculative, in mmu_set_spte()
2630 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in mmu_set_spte()
2642 ++vcpu->kvm->stat.lpages; in mmu_set_spte()
2646 rmap_count = rmap_add(vcpu, sptep, gfn); in mmu_set_spte()
2648 rmap_recycle(vcpu, sptep, gfn); in mmu_set_spte()
2655 static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, in pte_prefetch_gfn_to_pfn() argument
2660 slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log); in pte_prefetch_gfn_to_pfn()
2667 static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, in direct_pte_prefetch_many() argument
2677 if (!gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK)) in direct_pte_prefetch_many()
2680 ret = gfn_to_page_many_atomic(vcpu->kvm, gfn, pages, end - start); in direct_pte_prefetch_many()
2685 mmu_set_spte(vcpu, start, access, 0, NULL, in direct_pte_prefetch_many()
2692 static void __direct_pte_prefetch(struct kvm_vcpu *vcpu, in __direct_pte_prefetch() argument
2707 if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0) in __direct_pte_prefetch()
2715 static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) in direct_pte_prefetch() argument
2732 __direct_pte_prefetch(vcpu, sp, sptep); in direct_pte_prefetch()
2735 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, in __direct_map() argument
2744 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in __direct_map()
2747 for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) { in __direct_map()
2749 mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, in __direct_map()
2752 direct_pte_prefetch(vcpu, iterator.sptep); in __direct_map()
2753 ++vcpu->stat.pf_fixed; in __direct_map()
2757 drop_large_spte(vcpu, iterator.sptep); in __direct_map()
2763 sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr, in __direct_map()
2786 static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn) in kvm_handle_bad_page() argument
2798 kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current); in kvm_handle_bad_page()
2805 static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, in transparent_hugepage_adjust() argument
2821 !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) { in transparent_hugepage_adjust()
2846 static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, in handle_abnormal_pfn() argument
2853 *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn); in handle_abnormal_pfn()
2858 vcpu_cache_mmio_info(vcpu, gva, gfn, access); in handle_abnormal_pfn()
2887 fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, in fast_pf_fix_direct_spte() argument
2913 mark_page_dirty(vcpu->kvm, gfn); in fast_pf_fix_direct_spte()
2923 static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, in fast_page_fault() argument
2931 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in fast_page_fault()
2937 walk_shadow_page_lockless_begin(vcpu); in fast_page_fault()
2938 for_each_shadow_entry_lockless(vcpu, gva, iterator, spte) in fast_page_fault()
2991 ret = fast_pf_fix_direct_spte(vcpu, sp, iterator.sptep, spte); in fast_page_fault()
2993 trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep, in fast_page_fault()
2995 walk_shadow_page_lockless_end(vcpu); in fast_page_fault()
3000 static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
3002 static void make_mmu_pages_available(struct kvm_vcpu *vcpu);
3004 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, in nonpaging_map() argument
3014 force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn); in nonpaging_map()
3016 level = mapping_level(vcpu, gfn); in nonpaging_map()
3029 if (fast_page_fault(vcpu, v, level, error_code)) in nonpaging_map()
3032 mmu_seq = vcpu->kvm->mmu_notifier_seq; in nonpaging_map()
3035 if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable)) in nonpaging_map()
3038 if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r)) in nonpaging_map()
3041 spin_lock(&vcpu->kvm->mmu_lock); in nonpaging_map()
3042 if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) in nonpaging_map()
3044 make_mmu_pages_available(vcpu); in nonpaging_map()
3046 transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); in nonpaging_map()
3047 r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn, in nonpaging_map()
3049 spin_unlock(&vcpu->kvm->mmu_lock); in nonpaging_map()
3055 spin_unlock(&vcpu->kvm->mmu_lock); in nonpaging_map()
3061 static void mmu_free_roots(struct kvm_vcpu *vcpu) in mmu_free_roots() argument
3067 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in mmu_free_roots()
3070 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL && in mmu_free_roots()
3071 (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL || in mmu_free_roots()
3072 vcpu->arch.mmu.direct_map)) { in mmu_free_roots()
3073 hpa_t root = vcpu->arch.mmu.root_hpa; in mmu_free_roots()
3075 spin_lock(&vcpu->kvm->mmu_lock); in mmu_free_roots()
3079 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); in mmu_free_roots()
3080 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in mmu_free_roots()
3082 spin_unlock(&vcpu->kvm->mmu_lock); in mmu_free_roots()
3083 vcpu->arch.mmu.root_hpa = INVALID_PAGE; in mmu_free_roots()
3087 spin_lock(&vcpu->kvm->mmu_lock); in mmu_free_roots()
3089 hpa_t root = vcpu->arch.mmu.pae_root[i]; in mmu_free_roots()
3096 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, in mmu_free_roots()
3099 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; in mmu_free_roots()
3101 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in mmu_free_roots()
3102 spin_unlock(&vcpu->kvm->mmu_lock); in mmu_free_roots()
3103 vcpu->arch.mmu.root_hpa = INVALID_PAGE; in mmu_free_roots()
3106 static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn) in mmu_check_root() argument
3110 if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) { in mmu_check_root()
3111 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); in mmu_check_root()
3118 static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) in mmu_alloc_direct_roots() argument
3123 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { in mmu_alloc_direct_roots()
3124 spin_lock(&vcpu->kvm->mmu_lock); in mmu_alloc_direct_roots()
3125 make_mmu_pages_available(vcpu); in mmu_alloc_direct_roots()
3126 sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL, in mmu_alloc_direct_roots()
3129 spin_unlock(&vcpu->kvm->mmu_lock); in mmu_alloc_direct_roots()
3130 vcpu->arch.mmu.root_hpa = __pa(sp->spt); in mmu_alloc_direct_roots()
3131 } else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) { in mmu_alloc_direct_roots()
3133 hpa_t root = vcpu->arch.mmu.pae_root[i]; in mmu_alloc_direct_roots()
3136 spin_lock(&vcpu->kvm->mmu_lock); in mmu_alloc_direct_roots()
3137 make_mmu_pages_available(vcpu); in mmu_alloc_direct_roots()
3138 sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT), in mmu_alloc_direct_roots()
3144 spin_unlock(&vcpu->kvm->mmu_lock); in mmu_alloc_direct_roots()
3145 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; in mmu_alloc_direct_roots()
3147 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); in mmu_alloc_direct_roots()
3154 static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) in mmu_alloc_shadow_roots() argument
3161 root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT; in mmu_alloc_shadow_roots()
3163 if (mmu_check_root(vcpu, root_gfn)) in mmu_alloc_shadow_roots()
3170 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { in mmu_alloc_shadow_roots()
3171 hpa_t root = vcpu->arch.mmu.root_hpa; in mmu_alloc_shadow_roots()
3175 spin_lock(&vcpu->kvm->mmu_lock); in mmu_alloc_shadow_roots()
3176 make_mmu_pages_available(vcpu); in mmu_alloc_shadow_roots()
3177 sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL, in mmu_alloc_shadow_roots()
3181 spin_unlock(&vcpu->kvm->mmu_lock); in mmu_alloc_shadow_roots()
3182 vcpu->arch.mmu.root_hpa = root; in mmu_alloc_shadow_roots()
3192 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) in mmu_alloc_shadow_roots()
3196 hpa_t root = vcpu->arch.mmu.pae_root[i]; in mmu_alloc_shadow_roots()
3199 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) { in mmu_alloc_shadow_roots()
3200 pdptr = vcpu->arch.mmu.get_pdptr(vcpu, i); in mmu_alloc_shadow_roots()
3202 vcpu->arch.mmu.pae_root[i] = 0; in mmu_alloc_shadow_roots()
3206 if (mmu_check_root(vcpu, root_gfn)) in mmu_alloc_shadow_roots()
3209 spin_lock(&vcpu->kvm->mmu_lock); in mmu_alloc_shadow_roots()
3210 make_mmu_pages_available(vcpu); in mmu_alloc_shadow_roots()
3211 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, in mmu_alloc_shadow_roots()
3216 spin_unlock(&vcpu->kvm->mmu_lock); in mmu_alloc_shadow_roots()
3218 vcpu->arch.mmu.pae_root[i] = root | pm_mask; in mmu_alloc_shadow_roots()
3220 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); in mmu_alloc_shadow_roots()
3226 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { in mmu_alloc_shadow_roots()
3227 if (vcpu->arch.mmu.lm_root == NULL) { in mmu_alloc_shadow_roots()
3239 lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask; in mmu_alloc_shadow_roots()
3241 vcpu->arch.mmu.lm_root = lm_root; in mmu_alloc_shadow_roots()
3244 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root); in mmu_alloc_shadow_roots()
3250 static int mmu_alloc_roots(struct kvm_vcpu *vcpu) in mmu_alloc_roots() argument
3252 if (vcpu->arch.mmu.direct_map) in mmu_alloc_roots()
3253 return mmu_alloc_direct_roots(vcpu); in mmu_alloc_roots()
3255 return mmu_alloc_shadow_roots(vcpu); in mmu_alloc_roots()
3258 static void mmu_sync_roots(struct kvm_vcpu *vcpu) in mmu_sync_roots() argument
3263 if (vcpu->arch.mmu.direct_map) in mmu_sync_roots()
3266 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in mmu_sync_roots()
3269 vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY); in mmu_sync_roots()
3270 kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); in mmu_sync_roots()
3271 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { in mmu_sync_roots()
3272 hpa_t root = vcpu->arch.mmu.root_hpa; in mmu_sync_roots()
3274 mmu_sync_children(vcpu, sp); in mmu_sync_roots()
3275 kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); in mmu_sync_roots()
3279 hpa_t root = vcpu->arch.mmu.pae_root[i]; in mmu_sync_roots()
3284 mmu_sync_children(vcpu, sp); in mmu_sync_roots()
3287 kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); in mmu_sync_roots()
3290 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) in kvm_mmu_sync_roots() argument
3292 spin_lock(&vcpu->kvm->mmu_lock); in kvm_mmu_sync_roots()
3293 mmu_sync_roots(vcpu); in kvm_mmu_sync_roots()
3294 spin_unlock(&vcpu->kvm->mmu_lock); in kvm_mmu_sync_roots()
3298 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr, in nonpaging_gva_to_gpa() argument
3306 static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr, in nonpaging_gva_to_gpa_nested() argument
3312 return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception); in nonpaging_gva_to_gpa_nested()
3315 static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct) in quickly_check_mmio_pf() argument
3318 return vcpu_match_mmio_gpa(vcpu, addr); in quickly_check_mmio_pf()
3320 return vcpu_match_mmio_gva(vcpu, addr); in quickly_check_mmio_pf()
3323 static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr) in walk_shadow_page_get_mmio_spte() argument
3328 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in walk_shadow_page_get_mmio_spte()
3331 walk_shadow_page_lockless_begin(vcpu); in walk_shadow_page_get_mmio_spte()
3332 for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) in walk_shadow_page_get_mmio_spte()
3335 walk_shadow_page_lockless_end(vcpu); in walk_shadow_page_get_mmio_spte()
3340 int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct) in handle_mmio_page_fault_common() argument
3344 if (quickly_check_mmio_pf(vcpu, addr, direct)) in handle_mmio_page_fault_common()
3347 spte = walk_shadow_page_get_mmio_spte(vcpu, addr); in handle_mmio_page_fault_common()
3353 if (!check_mmio_spte(vcpu->kvm, spte)) in handle_mmio_page_fault_common()
3360 vcpu_cache_mmio_info(vcpu, addr, gfn, access); in handle_mmio_page_fault_common()
3372 static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, in handle_mmio_page_fault() argument
3377 ret = handle_mmio_page_fault_common(vcpu, addr, direct); in handle_mmio_page_fault()
3382 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, in nonpaging_page_fault() argument
3391 r = handle_mmio_page_fault(vcpu, gva, error_code, true); in nonpaging_page_fault()
3397 r = mmu_topup_memory_caches(vcpu); in nonpaging_page_fault()
3401 MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); in nonpaging_page_fault()
3405 return nonpaging_map(vcpu, gva & PAGE_MASK, in nonpaging_page_fault()
3409 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) in kvm_arch_setup_async_pf() argument
3413 arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; in kvm_arch_setup_async_pf()
3415 arch.direct_map = vcpu->arch.mmu.direct_map; in kvm_arch_setup_async_pf()
3416 arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu); in kvm_arch_setup_async_pf()
3418 return kvm_setup_async_pf(vcpu, gva, gfn_to_hva(vcpu->kvm, gfn), &arch); in kvm_arch_setup_async_pf()
3421 static bool can_do_async_pf(struct kvm_vcpu *vcpu) in can_do_async_pf() argument
3423 if (unlikely(!irqchip_in_kernel(vcpu->kvm) || in can_do_async_pf()
3424 kvm_event_needs_reinjection(vcpu))) in can_do_async_pf()
3427 return kvm_x86_ops->interrupt_allowed(vcpu); in can_do_async_pf()
3430 static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, in try_async_pf() argument
3435 *pfn = gfn_to_pfn_async(vcpu->kvm, gfn, &async, write, writable); in try_async_pf()
3440 if (!prefault && can_do_async_pf(vcpu)) { in try_async_pf()
3442 if (kvm_find_async_pf_gfn(vcpu, gfn)) { in try_async_pf()
3444 kvm_make_request(KVM_REQ_APF_HALT, vcpu); in try_async_pf()
3446 } else if (kvm_arch_setup_async_pf(vcpu, gva, gfn)) in try_async_pf()
3450 *pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write, writable); in try_async_pf()
3455 static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, in tdp_page_fault() argument
3467 MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); in tdp_page_fault()
3470 r = handle_mmio_page_fault(vcpu, gpa, error_code, true); in tdp_page_fault()
3476 r = mmu_topup_memory_caches(vcpu); in tdp_page_fault()
3480 force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn); in tdp_page_fault()
3482 level = mapping_level(vcpu, gfn); in tdp_page_fault()
3487 if (fast_page_fault(vcpu, gpa, level, error_code)) in tdp_page_fault()
3490 mmu_seq = vcpu->kvm->mmu_notifier_seq; in tdp_page_fault()
3493 if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable)) in tdp_page_fault()
3496 if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r)) in tdp_page_fault()
3499 spin_lock(&vcpu->kvm->mmu_lock); in tdp_page_fault()
3500 if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) in tdp_page_fault()
3502 make_mmu_pages_available(vcpu); in tdp_page_fault()
3504 transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); in tdp_page_fault()
3505 r = __direct_map(vcpu, gpa, write, map_writable, in tdp_page_fault()
3507 spin_unlock(&vcpu->kvm->mmu_lock); in tdp_page_fault()
3512 spin_unlock(&vcpu->kvm->mmu_lock); in tdp_page_fault()
3517 static void nonpaging_init_context(struct kvm_vcpu *vcpu, in nonpaging_init_context() argument
3532 void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu) in kvm_mmu_new_cr3() argument
3534 mmu_free_roots(vcpu); in kvm_mmu_new_cr3()
3537 static unsigned long get_cr3(struct kvm_vcpu *vcpu) in get_cr3() argument
3539 return kvm_read_cr3(vcpu); in get_cr3()
3542 static void inject_page_fault(struct kvm_vcpu *vcpu, in inject_page_fault() argument
3545 vcpu->arch.mmu.inject_page_fault(vcpu, fault); in inject_page_fault()
3587 static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, in reset_rsvds_bits_mask() argument
3590 int maxphyaddr = cpuid_maxphyaddr(vcpu); in reset_rsvds_bits_mask()
3599 if (!guest_cpuid_has_gbpages(vcpu)) in reset_rsvds_bits_mask()
3606 if (guest_cpuid_is_amd(vcpu)) in reset_rsvds_bits_mask()
3616 if (!is_pse(vcpu)) { in reset_rsvds_bits_mask()
3662 static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, in reset_rsvds_bits_mask_ept() argument
3665 int maxphyaddr = cpuid_maxphyaddr(vcpu); in reset_rsvds_bits_mask_ept()
3694 static void update_permission_bitmask(struct kvm_vcpu *vcpu, in update_permission_bitmask() argument
3701 cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); in update_permission_bitmask()
3702 cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP); in update_permission_bitmask()
3724 w |= !is_write_protection(vcpu) && !uf; in update_permission_bitmask()
3759 static void update_last_pte_bitmap(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) in update_last_pte_bitmap() argument
3771 && (mmu->root_level >= PT32E_ROOT_LEVEL || is_pse(vcpu))) in update_last_pte_bitmap()
3777 static void paging64_init_context_common(struct kvm_vcpu *vcpu, in paging64_init_context_common() argument
3781 context->nx = is_nx(vcpu); in paging64_init_context_common()
3784 reset_rsvds_bits_mask(vcpu, context); in paging64_init_context_common()
3785 update_permission_bitmask(vcpu, context, false); in paging64_init_context_common()
3786 update_last_pte_bitmap(vcpu, context); in paging64_init_context_common()
3788 MMU_WARN_ON(!is_pae(vcpu)); in paging64_init_context_common()
3799 static void paging64_init_context(struct kvm_vcpu *vcpu, in paging64_init_context() argument
3802 paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL); in paging64_init_context()
3805 static void paging32_init_context(struct kvm_vcpu *vcpu, in paging32_init_context() argument
3811 reset_rsvds_bits_mask(vcpu, context); in paging32_init_context()
3812 update_permission_bitmask(vcpu, context, false); in paging32_init_context()
3813 update_last_pte_bitmap(vcpu, context); in paging32_init_context()
3825 static void paging32E_init_context(struct kvm_vcpu *vcpu, in paging32E_init_context() argument
3828 paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL); in paging32E_init_context()
3831 static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) in init_kvm_tdp_mmu() argument
3833 struct kvm_mmu *context = &vcpu->arch.mmu; in init_kvm_tdp_mmu()
3848 if (!is_paging(vcpu)) { in init_kvm_tdp_mmu()
3852 } else if (is_long_mode(vcpu)) { in init_kvm_tdp_mmu()
3853 context->nx = is_nx(vcpu); in init_kvm_tdp_mmu()
3855 reset_rsvds_bits_mask(vcpu, context); in init_kvm_tdp_mmu()
3857 } else if (is_pae(vcpu)) { in init_kvm_tdp_mmu()
3858 context->nx = is_nx(vcpu); in init_kvm_tdp_mmu()
3860 reset_rsvds_bits_mask(vcpu, context); in init_kvm_tdp_mmu()
3865 reset_rsvds_bits_mask(vcpu, context); in init_kvm_tdp_mmu()
3869 update_permission_bitmask(vcpu, context, false); in init_kvm_tdp_mmu()
3870 update_last_pte_bitmap(vcpu, context); in init_kvm_tdp_mmu()
3873 void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) in kvm_init_shadow_mmu() argument
3875 bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); in kvm_init_shadow_mmu()
3876 bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP); in kvm_init_shadow_mmu()
3877 struct kvm_mmu *context = &vcpu->arch.mmu; in kvm_init_shadow_mmu()
3881 if (!is_paging(vcpu)) in kvm_init_shadow_mmu()
3882 nonpaging_init_context(vcpu, context); in kvm_init_shadow_mmu()
3883 else if (is_long_mode(vcpu)) in kvm_init_shadow_mmu()
3884 paging64_init_context(vcpu, context); in kvm_init_shadow_mmu()
3885 else if (is_pae(vcpu)) in kvm_init_shadow_mmu()
3886 paging32E_init_context(vcpu, context); in kvm_init_shadow_mmu()
3888 paging32_init_context(vcpu, context); in kvm_init_shadow_mmu()
3890 context->base_role.nxe = is_nx(vcpu); in kvm_init_shadow_mmu()
3891 context->base_role.cr4_pae = !!is_pae(vcpu); in kvm_init_shadow_mmu()
3892 context->base_role.cr0_wp = is_write_protection(vcpu); in kvm_init_shadow_mmu()
3894 = smep && !is_write_protection(vcpu); in kvm_init_shadow_mmu()
3896 = smap && !is_write_protection(vcpu); in kvm_init_shadow_mmu()
3900 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly) in kvm_init_shadow_ept_mmu() argument
3902 struct kvm_mmu *context = &vcpu->arch.mmu; in kvm_init_shadow_ept_mmu()
3918 update_permission_bitmask(vcpu, context, true); in kvm_init_shadow_ept_mmu()
3919 reset_rsvds_bits_mask_ept(vcpu, context, execonly); in kvm_init_shadow_ept_mmu()
3923 static void init_kvm_softmmu(struct kvm_vcpu *vcpu) in init_kvm_softmmu() argument
3925 struct kvm_mmu *context = &vcpu->arch.mmu; in init_kvm_softmmu()
3927 kvm_init_shadow_mmu(vcpu); in init_kvm_softmmu()
3934 static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu) in init_kvm_nested_mmu() argument
3936 struct kvm_mmu *g_context = &vcpu->arch.nested_mmu; in init_kvm_nested_mmu()
3948 if (!is_paging(vcpu)) { in init_kvm_nested_mmu()
3952 } else if (is_long_mode(vcpu)) { in init_kvm_nested_mmu()
3953 g_context->nx = is_nx(vcpu); in init_kvm_nested_mmu()
3955 reset_rsvds_bits_mask(vcpu, g_context); in init_kvm_nested_mmu()
3957 } else if (is_pae(vcpu)) { in init_kvm_nested_mmu()
3958 g_context->nx = is_nx(vcpu); in init_kvm_nested_mmu()
3960 reset_rsvds_bits_mask(vcpu, g_context); in init_kvm_nested_mmu()
3965 reset_rsvds_bits_mask(vcpu, g_context); in init_kvm_nested_mmu()
3969 update_permission_bitmask(vcpu, g_context, false); in init_kvm_nested_mmu()
3970 update_last_pte_bitmap(vcpu, g_context); in init_kvm_nested_mmu()
3973 static void init_kvm_mmu(struct kvm_vcpu *vcpu) in init_kvm_mmu() argument
3975 if (mmu_is_nested(vcpu)) in init_kvm_mmu()
3976 init_kvm_nested_mmu(vcpu); in init_kvm_mmu()
3978 init_kvm_tdp_mmu(vcpu); in init_kvm_mmu()
3980 init_kvm_softmmu(vcpu); in init_kvm_mmu()
3983 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu) in kvm_mmu_reset_context() argument
3985 kvm_mmu_unload(vcpu); in kvm_mmu_reset_context()
3986 init_kvm_mmu(vcpu); in kvm_mmu_reset_context()
3990 int kvm_mmu_load(struct kvm_vcpu *vcpu) in kvm_mmu_load() argument
3994 r = mmu_topup_memory_caches(vcpu); in kvm_mmu_load()
3997 r = mmu_alloc_roots(vcpu); in kvm_mmu_load()
3998 kvm_mmu_sync_roots(vcpu); in kvm_mmu_load()
4002 vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa); in kvm_mmu_load()
4008 void kvm_mmu_unload(struct kvm_vcpu *vcpu) in kvm_mmu_unload() argument
4010 mmu_free_roots(vcpu); in kvm_mmu_unload()
4011 WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); in kvm_mmu_unload()
4015 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, in mmu_pte_write_new_pte() argument
4020 ++vcpu->kvm->stat.mmu_pde_zapped; in mmu_pte_write_new_pte()
4024 ++vcpu->kvm->stat.mmu_pte_updated; in mmu_pte_write_new_pte()
4025 vcpu->arch.mmu.update_pte(vcpu, sp, spte, new); in mmu_pte_write_new_pte()
4041 static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page, in mmu_pte_write_flush_tlb() argument
4048 kvm_flush_remote_tlbs(vcpu->kvm); in mmu_pte_write_flush_tlb()
4050 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in mmu_pte_write_flush_tlb()
4053 static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa, in mmu_pte_write_fetch_gpte() argument
4064 if (is_pae(vcpu) && *bytes == 4) { in mmu_pte_write_fetch_gpte()
4068 r = kvm_read_guest(vcpu->kvm, *gpa, &gentry, 8); in mmu_pte_write_fetch_gpte()
4164 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, in kvm_mmu_pte_write() argument
4185 if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages)) in kvm_mmu_pte_write()
4192 gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, new, &bytes); in kvm_mmu_pte_write()
4199 mmu_topup_memory_caches(vcpu); in kvm_mmu_pte_write()
4201 spin_lock(&vcpu->kvm->mmu_lock); in kvm_mmu_pte_write()
4202 ++vcpu->kvm->stat.mmu_pte_write; in kvm_mmu_pte_write()
4203 kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); in kvm_mmu_pte_write()
4205 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { in kvm_mmu_pte_write()
4208 zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp, in kvm_mmu_pte_write()
4210 ++vcpu->kvm->stat.mmu_flooded; in kvm_mmu_pte_write()
4221 mmu_page_zap_pte(vcpu->kvm, sp, spte); in kvm_mmu_pte_write()
4223 !((sp->role.word ^ vcpu->arch.mmu.base_role.word) in kvm_mmu_pte_write()
4224 & mask.word) && rmap_can_add(vcpu)) in kvm_mmu_pte_write()
4225 mmu_pte_write_new_pte(vcpu, sp, spte, &gentry); in kvm_mmu_pte_write()
4231 mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush); in kvm_mmu_pte_write()
4232 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in kvm_mmu_pte_write()
4233 kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE); in kvm_mmu_pte_write()
4234 spin_unlock(&vcpu->kvm->mmu_lock); in kvm_mmu_pte_write()
4237 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) in kvm_mmu_unprotect_page_virt() argument
4242 if (vcpu->arch.mmu.direct_map) in kvm_mmu_unprotect_page_virt()
4245 gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); in kvm_mmu_unprotect_page_virt()
4247 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); in kvm_mmu_unprotect_page_virt()
4253 static void make_mmu_pages_available(struct kvm_vcpu *vcpu) in make_mmu_pages_available() argument
4257 if (likely(kvm_mmu_available_pages(vcpu->kvm) >= KVM_MIN_FREE_MMU_PAGES)) in make_mmu_pages_available()
4260 while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) { in make_mmu_pages_available()
4261 if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list)) in make_mmu_pages_available()
4264 ++vcpu->kvm->stat.mmu_recycled; in make_mmu_pages_available()
4266 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in make_mmu_pages_available()
4269 static bool is_mmio_page_fault(struct kvm_vcpu *vcpu, gva_t addr) in is_mmio_page_fault() argument
4271 if (vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu)) in is_mmio_page_fault()
4272 return vcpu_match_mmio_gpa(vcpu, addr); in is_mmio_page_fault()
4274 return vcpu_match_mmio_gva(vcpu, addr); in is_mmio_page_fault()
4277 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code, in kvm_mmu_page_fault() argument
4283 r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false); in kvm_mmu_page_fault()
4292 if (is_mmio_page_fault(vcpu, cr2)) in kvm_mmu_page_fault()
4295 er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len); in kvm_mmu_page_fault()
4301 ++vcpu->stat.mmio_exits; in kvm_mmu_page_fault()
4313 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) in kvm_mmu_invlpg() argument
4315 vcpu->arch.mmu.invlpg(vcpu, gva); in kvm_mmu_invlpg()
4316 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in kvm_mmu_invlpg()
4317 ++vcpu->stat.invlpg; in kvm_mmu_invlpg()
4333 static void free_mmu_pages(struct kvm_vcpu *vcpu) in free_mmu_pages() argument
4335 free_page((unsigned long)vcpu->arch.mmu.pae_root); in free_mmu_pages()
4336 if (vcpu->arch.mmu.lm_root != NULL) in free_mmu_pages()
4337 free_page((unsigned long)vcpu->arch.mmu.lm_root); in free_mmu_pages()
4340 static int alloc_mmu_pages(struct kvm_vcpu *vcpu) in alloc_mmu_pages() argument
4354 vcpu->arch.mmu.pae_root = page_address(page); in alloc_mmu_pages()
4356 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; in alloc_mmu_pages()
4361 int kvm_mmu_create(struct kvm_vcpu *vcpu) in kvm_mmu_create() argument
4363 vcpu->arch.walk_mmu = &vcpu->arch.mmu; in kvm_mmu_create()
4364 vcpu->arch.mmu.root_hpa = INVALID_PAGE; in kvm_mmu_create()
4365 vcpu->arch.mmu.translate_gpa = translate_gpa; in kvm_mmu_create()
4366 vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa; in kvm_mmu_create()
4368 return alloc_mmu_pages(vcpu); in kvm_mmu_create()
4371 void kvm_mmu_setup(struct kvm_vcpu *vcpu) in kvm_mmu_setup() argument
4373 MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); in kvm_mmu_setup()
4375 init_kvm_mmu(vcpu); in kvm_mmu_setup()
4840 int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]) in kvm_mmu_get_spte_hierarchy() argument
4846 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in kvm_mmu_get_spte_hierarchy()
4849 walk_shadow_page_lockless_begin(vcpu); in kvm_mmu_get_spte_hierarchy()
4850 for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) { in kvm_mmu_get_spte_hierarchy()
4856 walk_shadow_page_lockless_end(vcpu); in kvm_mmu_get_spte_hierarchy()
4862 void kvm_mmu_destroy(struct kvm_vcpu *vcpu) in kvm_mmu_destroy() argument
4864 kvm_mmu_unload(vcpu); in kvm_mmu_destroy()
4865 free_mmu_pages(vcpu); in kvm_mmu_destroy()
4866 mmu_free_memory_caches(vcpu); in kvm_mmu_destroy()