Lines Matching refs:kvm
226 static unsigned int kvm_current_mmio_generation(struct kvm *kvm) in kvm_current_mmio_generation() argument
228 return kvm_memslots(kvm)->generation & MMIO_GEN_MASK; in kvm_current_mmio_generation()
231 static void mark_mmio_spte(struct kvm *kvm, u64 *sptep, u64 gfn, in mark_mmio_spte() argument
234 unsigned int gen = kvm_current_mmio_generation(kvm); in mark_mmio_spte()
261 static bool set_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn, in set_mmio_spte() argument
265 mark_mmio_spte(kvm, sptep, gfn, access); in set_mmio_spte()
272 static bool check_mmio_spte(struct kvm *kvm, u64 spte) in check_mmio_spte() argument
276 kvm_gen = kvm_current_mmio_generation(kvm); in check_mmio_spte()
784 static void account_shadowed(struct kvm *kvm, gfn_t gfn) in account_shadowed() argument
790 slot = gfn_to_memslot(kvm, gfn); in account_shadowed()
796 kvm->arch.indirect_shadow_pages++; in account_shadowed()
799 static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn) in unaccount_shadowed() argument
805 slot = gfn_to_memslot(kvm, gfn); in unaccount_shadowed()
812 kvm->arch.indirect_shadow_pages--; in unaccount_shadowed()
815 static int has_wrprotected_page(struct kvm *kvm, in has_wrprotected_page() argument
822 slot = gfn_to_memslot(kvm, gfn); in has_wrprotected_page()
831 static int host_mapping_level(struct kvm *kvm, gfn_t gfn) in host_mapping_level() argument
836 page_size = kvm_host_page_size(kvm, gfn); in host_mapping_level()
855 slot = gfn_to_memslot(vcpu->kvm, gfn); in gfn_to_memslot_dirty_bitmap()
872 host_level = host_mapping_level(vcpu->kvm, large_gfn); in mapping_level()
880 if (has_wrprotected_page(vcpu->kvm, large_gfn, level)) in mapping_level()
1022 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level) in gfn_to_rmap() argument
1026 slot = gfn_to_memslot(kvm, gfn); in gfn_to_rmap()
1045 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); in rmap_add()
1049 static void rmap_remove(struct kvm *kvm, u64 *spte) in rmap_remove() argument
1057 rmapp = gfn_to_rmap(kvm, gfn, sp->role.level); in rmap_remove()
1122 static void drop_spte(struct kvm *kvm, u64 *sptep) in drop_spte() argument
1125 rmap_remove(kvm, sptep); in drop_spte()
1129 static bool __drop_large_spte(struct kvm *kvm, u64 *sptep) in __drop_large_spte() argument
1134 drop_spte(kvm, sptep); in __drop_large_spte()
1135 --kvm->stat.lpages; in __drop_large_spte()
1144 if (__drop_large_spte(vcpu->kvm, sptep)) in drop_large_spte()
1145 kvm_flush_remote_tlbs(vcpu->kvm); in drop_large_spte()
1161 static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool pt_protect) in spte_write_protect() argument
1178 static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, in __rmap_write_protect() argument
1188 flush |= spte_write_protect(kvm, sptep, pt_protect); in __rmap_write_protect()
1195 static bool spte_clear_dirty(struct kvm *kvm, u64 *sptep) in spte_clear_dirty() argument
1206 static bool __rmap_clear_dirty(struct kvm *kvm, unsigned long *rmapp) in __rmap_clear_dirty() argument
1215 flush |= spte_clear_dirty(kvm, sptep); in __rmap_clear_dirty()
1222 static bool spte_set_dirty(struct kvm *kvm, u64 *sptep) in spte_set_dirty() argument
1233 static bool __rmap_set_dirty(struct kvm *kvm, unsigned long *rmapp) in __rmap_set_dirty() argument
1242 flush |= spte_set_dirty(kvm, sptep); in __rmap_set_dirty()
1259 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, in kvm_mmu_write_protect_pt_masked() argument
1268 __rmap_write_protect(kvm, rmapp, false); in kvm_mmu_write_protect_pt_masked()
1284 void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, in kvm_mmu_clear_dirty_pt_masked() argument
1293 __rmap_clear_dirty(kvm, rmapp); in kvm_mmu_clear_dirty_pt_masked()
1311 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, in kvm_arch_mmu_enable_log_dirty_pt_masked() argument
1316 kvm_x86_ops->enable_log_dirty_pt_masked(kvm, slot, gfn_offset, in kvm_arch_mmu_enable_log_dirty_pt_masked()
1319 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); in kvm_arch_mmu_enable_log_dirty_pt_masked()
1322 static bool rmap_write_protect(struct kvm *kvm, u64 gfn) in rmap_write_protect() argument
1329 slot = gfn_to_memslot(kvm, gfn); in rmap_write_protect()
1334 write_protected |= __rmap_write_protect(kvm, rmapp, true); in rmap_write_protect()
1340 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, in kvm_unmap_rmapp() argument
1353 drop_spte(kvm, sptep); in kvm_unmap_rmapp()
1360 static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, in kvm_set_pte_rmapp() argument
1382 drop_spte(kvm, sptep); in kvm_set_pte_rmapp()
1399 kvm_flush_remote_tlbs(kvm); in kvm_set_pte_rmapp()
1404 static int kvm_handle_hva_range(struct kvm *kvm, in kvm_handle_hva_range() argument
1408 int (*handler)(struct kvm *kvm, in kvm_handle_hva_range() argument
1420 slots = kvm_memslots(kvm); in kvm_handle_hva_range()
1455 ret |= handler(kvm, rmapp++, memslot, in kvm_handle_hva_range()
1463 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, in kvm_handle_hva() argument
1465 int (*handler)(struct kvm *kvm, unsigned long *rmapp, in kvm_handle_hva() argument
1470 return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler); in kvm_handle_hva()
1473 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) in kvm_unmap_hva() argument
1475 return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp); in kvm_unmap_hva()
1478 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) in kvm_unmap_hva_range() argument
1480 return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp); in kvm_unmap_hva_range()
1483 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) in kvm_set_spte_hva() argument
1485 kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp); in kvm_set_spte_hva()
1488 static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, in kvm_age_rmapp() argument
1512 static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, in kvm_test_age_rmapp() argument
1550 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); in rmap_recycle()
1552 kvm_unmap_rmapp(vcpu->kvm, rmapp, NULL, gfn, sp->role.level, 0); in rmap_recycle()
1553 kvm_flush_remote_tlbs(vcpu->kvm); in rmap_recycle()
1556 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) in kvm_age_hva() argument
1573 kvm->mmu_notifier_seq++; in kvm_age_hva()
1574 return kvm_handle_hva_range(kvm, start, end, 0, in kvm_age_hva()
1578 return kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp); in kvm_age_hva()
1581 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) in kvm_test_age_hva() argument
1583 return kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp); in kvm_test_age_hva()
1608 static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr) in kvm_mod_used_mmu_pages() argument
1610 kvm->arch.n_used_mmu_pages += nr; in kvm_mod_used_mmu_pages()
1668 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); in kvm_mmu_alloc_page()
1671 kvm_mod_used_mmu_pages(vcpu->kvm, +1); in kvm_mmu_alloc_page()
1792 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_unlink_unsync_page() argument
1797 --kvm->stat.mmu_unsync; in kvm_unlink_unsync_page()
1800 static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1802 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1829 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); in __kvm_sync_page()
1834 kvm_unlink_unsync_page(vcpu->kvm, sp); in __kvm_sync_page()
1837 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); in __kvm_sync_page()
1853 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in kvm_sync_page_transient()
1878 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { in kvm_sync_pages()
1883 kvm_unlink_unsync_page(vcpu->kvm, s); in kvm_sync_pages()
1886 kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list); in kvm_sync_pages()
1892 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in kvm_sync_pages()
1970 protected |= rmap_write_protect(vcpu->kvm, sp->gfn); in mmu_sync_children()
1973 kvm_flush_remote_tlbs(vcpu->kvm); in mmu_sync_children()
1979 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in mmu_sync_children()
1980 cond_resched_lock(&vcpu->kvm->mmu_lock); in mmu_sync_children()
2005 static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp) in is_obsolete_sp() argument
2007 return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen); in is_obsolete_sp()
2035 for_each_gfn_sp(vcpu->kvm, sp, gfn) { in kvm_mmu_get_page()
2036 if (is_obsolete_sp(vcpu->kvm, sp)) in kvm_mmu_get_page()
2059 ++vcpu->kvm->stat.mmu_cache_miss; in kvm_mmu_get_page()
2066 &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]); in kvm_mmu_get_page()
2068 if (rmap_write_protect(vcpu->kvm, gfn)) in kvm_mmu_get_page()
2069 kvm_flush_remote_tlbs(vcpu->kvm); in kvm_mmu_get_page()
2073 account_shadowed(vcpu->kvm, gfn); in kvm_mmu_get_page()
2075 sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen; in kvm_mmu_get_page()
2164 kvm_flush_remote_tlbs(vcpu->kvm); in validate_direct_spte()
2168 static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, in mmu_page_zap_pte() argument
2177 drop_spte(kvm, spte); in mmu_page_zap_pte()
2179 --kvm->stat.lpages; in mmu_page_zap_pte()
2193 static void kvm_mmu_page_unlink_children(struct kvm *kvm, in kvm_mmu_page_unlink_children() argument
2199 mmu_page_zap_pte(kvm, sp, sp->spt + i); in kvm_mmu_page_unlink_children()
2207 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_mmu_unlink_parents() argument
2216 static int mmu_zap_unsync_children(struct kvm *kvm, in mmu_zap_unsync_children() argument
2232 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); in mmu_zap_unsync_children()
2242 static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, in kvm_mmu_prepare_zap_page() argument
2248 ++kvm->stat.mmu_shadow_zapped; in kvm_mmu_prepare_zap_page()
2249 ret = mmu_zap_unsync_children(kvm, sp, invalid_list); in kvm_mmu_prepare_zap_page()
2250 kvm_mmu_page_unlink_children(kvm, sp); in kvm_mmu_prepare_zap_page()
2251 kvm_mmu_unlink_parents(kvm, sp); in kvm_mmu_prepare_zap_page()
2254 unaccount_shadowed(kvm, sp->gfn); in kvm_mmu_prepare_zap_page()
2257 kvm_unlink_unsync_page(kvm, sp); in kvm_mmu_prepare_zap_page()
2262 kvm_mod_used_mmu_pages(kvm, -1); in kvm_mmu_prepare_zap_page()
2264 list_move(&sp->link, &kvm->arch.active_mmu_pages); in kvm_mmu_prepare_zap_page()
2270 if (!sp->role.invalid && !is_obsolete_sp(kvm, sp)) in kvm_mmu_prepare_zap_page()
2271 kvm_reload_remote_mmus(kvm); in kvm_mmu_prepare_zap_page()
2278 static void kvm_mmu_commit_zap_page(struct kvm *kvm, in kvm_mmu_commit_zap_page() argument
2296 kvm_flush_remote_tlbs(kvm); in kvm_mmu_commit_zap_page()
2304 static bool prepare_zap_oldest_mmu_page(struct kvm *kvm, in prepare_zap_oldest_mmu_page() argument
2309 if (list_empty(&kvm->arch.active_mmu_pages)) in prepare_zap_oldest_mmu_page()
2312 sp = list_entry(kvm->arch.active_mmu_pages.prev, in prepare_zap_oldest_mmu_page()
2314 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); in prepare_zap_oldest_mmu_page()
2323 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages) in kvm_mmu_change_mmu_pages() argument
2327 spin_lock(&kvm->mmu_lock); in kvm_mmu_change_mmu_pages()
2329 if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) { in kvm_mmu_change_mmu_pages()
2331 while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) in kvm_mmu_change_mmu_pages()
2332 if (!prepare_zap_oldest_mmu_page(kvm, &invalid_list)) in kvm_mmu_change_mmu_pages()
2335 kvm_mmu_commit_zap_page(kvm, &invalid_list); in kvm_mmu_change_mmu_pages()
2336 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages; in kvm_mmu_change_mmu_pages()
2339 kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages; in kvm_mmu_change_mmu_pages()
2341 spin_unlock(&kvm->mmu_lock); in kvm_mmu_change_mmu_pages()
2344 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) in kvm_mmu_unprotect_page() argument
2352 spin_lock(&kvm->mmu_lock); in kvm_mmu_unprotect_page()
2353 for_each_gfn_indirect_valid_sp(kvm, sp, gfn) { in kvm_mmu_unprotect_page()
2357 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); in kvm_mmu_unprotect_page()
2359 kvm_mmu_commit_zap_page(kvm, &invalid_list); in kvm_mmu_unprotect_page()
2360 spin_unlock(&kvm->mmu_lock); in kvm_mmu_unprotect_page()
2474 ++vcpu->kvm->stat.mmu_unsync; in __kvm_unsync_page()
2484 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { in kvm_unsync_pages()
2498 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { in mmu_need_write_protect()
2521 if (set_mmio_spte(vcpu->kvm, sptep, gfn, pfn, pte_access)) in set_spte()
2558 has_wrprotected_page(vcpu->kvm, gfn, level)) in set_spte()
2582 mark_page_dirty(vcpu->kvm, gfn); in set_spte()
2588 kvm_flush_remote_tlbs(vcpu->kvm); in set_spte()
2616 kvm_flush_remote_tlbs(vcpu->kvm); in mmu_set_spte()
2620 drop_spte(vcpu->kvm, sptep); in mmu_set_spte()
2621 kvm_flush_remote_tlbs(vcpu->kvm); in mmu_set_spte()
2642 ++vcpu->kvm->stat.lpages; in mmu_set_spte()
2680 ret = gfn_to_page_many_atomic(vcpu->kvm, gfn, pages, end - start); in direct_pte_prefetch_many()
2798 kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current); in kvm_handle_bad_page()
2821 !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) { in transparent_hugepage_adjust()
2913 mark_page_dirty(vcpu->kvm, gfn); in fast_pf_fix_direct_spte()
3032 mmu_seq = vcpu->kvm->mmu_notifier_seq; in nonpaging_map()
3041 spin_lock(&vcpu->kvm->mmu_lock); in nonpaging_map()
3042 if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) in nonpaging_map()
3049 spin_unlock(&vcpu->kvm->mmu_lock); in nonpaging_map()
3055 spin_unlock(&vcpu->kvm->mmu_lock); in nonpaging_map()
3075 spin_lock(&vcpu->kvm->mmu_lock); in mmu_free_roots()
3079 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); in mmu_free_roots()
3080 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in mmu_free_roots()
3082 spin_unlock(&vcpu->kvm->mmu_lock); in mmu_free_roots()
3087 spin_lock(&vcpu->kvm->mmu_lock); in mmu_free_roots()
3096 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, in mmu_free_roots()
3101 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in mmu_free_roots()
3102 spin_unlock(&vcpu->kvm->mmu_lock); in mmu_free_roots()
3110 if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) { in mmu_check_root()
3124 spin_lock(&vcpu->kvm->mmu_lock); in mmu_alloc_direct_roots()
3129 spin_unlock(&vcpu->kvm->mmu_lock); in mmu_alloc_direct_roots()
3136 spin_lock(&vcpu->kvm->mmu_lock); in mmu_alloc_direct_roots()
3144 spin_unlock(&vcpu->kvm->mmu_lock); in mmu_alloc_direct_roots()
3175 spin_lock(&vcpu->kvm->mmu_lock); in mmu_alloc_shadow_roots()
3181 spin_unlock(&vcpu->kvm->mmu_lock); in mmu_alloc_shadow_roots()
3209 spin_lock(&vcpu->kvm->mmu_lock); in mmu_alloc_shadow_roots()
3216 spin_unlock(&vcpu->kvm->mmu_lock); in mmu_alloc_shadow_roots()
3292 spin_lock(&vcpu->kvm->mmu_lock); in kvm_mmu_sync_roots()
3294 spin_unlock(&vcpu->kvm->mmu_lock); in kvm_mmu_sync_roots()
3353 if (!check_mmio_spte(vcpu->kvm, spte)) in handle_mmio_page_fault_common()
3418 return kvm_setup_async_pf(vcpu, gva, gfn_to_hva(vcpu->kvm, gfn), &arch); in kvm_arch_setup_async_pf()
3423 if (unlikely(!irqchip_in_kernel(vcpu->kvm) || in can_do_async_pf()
3435 *pfn = gfn_to_pfn_async(vcpu->kvm, gfn, &async, write, writable); in try_async_pf()
3450 *pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write, writable); in try_async_pf()
3490 mmu_seq = vcpu->kvm->mmu_notifier_seq; in tdp_page_fault()
3499 spin_lock(&vcpu->kvm->mmu_lock); in tdp_page_fault()
3500 if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) in tdp_page_fault()
3507 spin_unlock(&vcpu->kvm->mmu_lock); in tdp_page_fault()
3512 spin_unlock(&vcpu->kvm->mmu_lock); in tdp_page_fault()
3548 static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn, in sync_mmio_spte() argument
3558 mark_mmio_spte(kvm, sptep, gfn, access); in sync_mmio_spte()
4020 ++vcpu->kvm->stat.mmu_pde_zapped; in mmu_pte_write_new_pte()
4024 ++vcpu->kvm->stat.mmu_pte_updated; in mmu_pte_write_new_pte()
4048 kvm_flush_remote_tlbs(vcpu->kvm); in mmu_pte_write_flush_tlb()
4068 r = kvm_read_guest(vcpu->kvm, *gpa, &gentry, 8); in mmu_pte_write_fetch_gpte()
4185 if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages)) in kvm_mmu_pte_write()
4201 spin_lock(&vcpu->kvm->mmu_lock); in kvm_mmu_pte_write()
4202 ++vcpu->kvm->stat.mmu_pte_write; in kvm_mmu_pte_write()
4205 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { in kvm_mmu_pte_write()
4208 zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp, in kvm_mmu_pte_write()
4210 ++vcpu->kvm->stat.mmu_flooded; in kvm_mmu_pte_write()
4221 mmu_page_zap_pte(vcpu->kvm, sp, spte); in kvm_mmu_pte_write()
4232 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in kvm_mmu_pte_write()
4234 spin_unlock(&vcpu->kvm->mmu_lock); in kvm_mmu_pte_write()
4247 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); in kvm_mmu_unprotect_page_virt()
4257 if (likely(kvm_mmu_available_pages(vcpu->kvm) >= KVM_MIN_FREE_MMU_PAGES)) in make_mmu_pages_available()
4260 while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) { in make_mmu_pages_available()
4261 if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list)) in make_mmu_pages_available()
4264 ++vcpu->kvm->stat.mmu_recycled; in make_mmu_pages_available()
4266 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in make_mmu_pages_available()
4378 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, in kvm_mmu_slot_remove_write_access() argument
4387 spin_lock(&kvm->mmu_lock); in kvm_mmu_slot_remove_write_access()
4399 flush |= __rmap_write_protect(kvm, rmapp, in kvm_mmu_slot_remove_write_access()
4402 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) in kvm_mmu_slot_remove_write_access()
4403 cond_resched_lock(&kvm->mmu_lock); in kvm_mmu_slot_remove_write_access()
4407 spin_unlock(&kvm->mmu_lock); in kvm_mmu_slot_remove_write_access()
4414 lockdep_assert_held(&kvm->slots_lock); in kvm_mmu_slot_remove_write_access()
4428 kvm_flush_remote_tlbs(kvm); in kvm_mmu_slot_remove_write_access()
4431 static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, in kvm_mmu_zap_collapsible_spte() argument
4456 drop_spte(kvm, sptep); in kvm_mmu_zap_collapsible_spte()
4466 void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, in kvm_mmu_zap_collapsible_sptes() argument
4473 spin_lock(&kvm->mmu_lock); in kvm_mmu_zap_collapsible_sptes()
4481 flush |= kvm_mmu_zap_collapsible_spte(kvm, rmapp); in kvm_mmu_zap_collapsible_sptes()
4483 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { in kvm_mmu_zap_collapsible_sptes()
4485 kvm_flush_remote_tlbs(kvm); in kvm_mmu_zap_collapsible_sptes()
4488 cond_resched_lock(&kvm->mmu_lock); in kvm_mmu_zap_collapsible_sptes()
4493 kvm_flush_remote_tlbs(kvm); in kvm_mmu_zap_collapsible_sptes()
4495 spin_unlock(&kvm->mmu_lock); in kvm_mmu_zap_collapsible_sptes()
4498 void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, in kvm_mmu_slot_leaf_clear_dirty() argument
4508 spin_lock(&kvm->mmu_lock); in kvm_mmu_slot_leaf_clear_dirty()
4516 flush |= __rmap_clear_dirty(kvm, rmapp); in kvm_mmu_slot_leaf_clear_dirty()
4518 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) in kvm_mmu_slot_leaf_clear_dirty()
4519 cond_resched_lock(&kvm->mmu_lock); in kvm_mmu_slot_leaf_clear_dirty()
4522 spin_unlock(&kvm->mmu_lock); in kvm_mmu_slot_leaf_clear_dirty()
4524 lockdep_assert_held(&kvm->slots_lock); in kvm_mmu_slot_leaf_clear_dirty()
4533 kvm_flush_remote_tlbs(kvm); in kvm_mmu_slot_leaf_clear_dirty()
4537 void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm, in kvm_mmu_slot_largepage_remove_write_access() argument
4546 spin_lock(&kvm->mmu_lock); in kvm_mmu_slot_largepage_remove_write_access()
4558 flush |= __rmap_write_protect(kvm, rmapp, in kvm_mmu_slot_largepage_remove_write_access()
4561 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) in kvm_mmu_slot_largepage_remove_write_access()
4562 cond_resched_lock(&kvm->mmu_lock); in kvm_mmu_slot_largepage_remove_write_access()
4565 spin_unlock(&kvm->mmu_lock); in kvm_mmu_slot_largepage_remove_write_access()
4568 lockdep_assert_held(&kvm->slots_lock); in kvm_mmu_slot_largepage_remove_write_access()
4571 kvm_flush_remote_tlbs(kvm); in kvm_mmu_slot_largepage_remove_write_access()
4575 void kvm_mmu_slot_set_dirty(struct kvm *kvm, in kvm_mmu_slot_set_dirty() argument
4584 spin_lock(&kvm->mmu_lock); in kvm_mmu_slot_set_dirty()
4596 flush |= __rmap_set_dirty(kvm, rmapp); in kvm_mmu_slot_set_dirty()
4598 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) in kvm_mmu_slot_set_dirty()
4599 cond_resched_lock(&kvm->mmu_lock); in kvm_mmu_slot_set_dirty()
4603 spin_unlock(&kvm->mmu_lock); in kvm_mmu_slot_set_dirty()
4605 lockdep_assert_held(&kvm->slots_lock); in kvm_mmu_slot_set_dirty()
4609 kvm_flush_remote_tlbs(kvm); in kvm_mmu_slot_set_dirty()
4614 static void kvm_zap_obsolete_pages(struct kvm *kvm) in kvm_zap_obsolete_pages() argument
4621 &kvm->arch.active_mmu_pages, link) { in kvm_zap_obsolete_pages()
4628 if (!is_obsolete_sp(kvm, sp)) in kvm_zap_obsolete_pages()
4644 cond_resched_lock(&kvm->mmu_lock)) { in kvm_zap_obsolete_pages()
4649 ret = kvm_mmu_prepare_zap_page(kvm, sp, in kvm_zap_obsolete_pages()
4650 &kvm->arch.zapped_obsolete_pages); in kvm_zap_obsolete_pages()
4661 kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages); in kvm_zap_obsolete_pages()
4673 void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm) in kvm_mmu_invalidate_zap_all_pages() argument
4675 spin_lock(&kvm->mmu_lock); in kvm_mmu_invalidate_zap_all_pages()
4676 trace_kvm_mmu_invalidate_zap_all_pages(kvm); in kvm_mmu_invalidate_zap_all_pages()
4677 kvm->arch.mmu_valid_gen++; in kvm_mmu_invalidate_zap_all_pages()
4688 kvm_reload_remote_mmus(kvm); in kvm_mmu_invalidate_zap_all_pages()
4690 kvm_zap_obsolete_pages(kvm); in kvm_mmu_invalidate_zap_all_pages()
4691 spin_unlock(&kvm->mmu_lock); in kvm_mmu_invalidate_zap_all_pages()
4694 static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm) in kvm_has_zapped_obsolete_pages() argument
4696 return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages)); in kvm_has_zapped_obsolete_pages()
4699 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm) in kvm_mmu_invalidate_mmio_sptes() argument
4705 if (unlikely(kvm_current_mmio_generation(kvm) == 0)) { in kvm_mmu_invalidate_mmio_sptes()
4707 kvm_mmu_invalidate_zap_all_pages(kvm); in kvm_mmu_invalidate_mmio_sptes()
4714 struct kvm *kvm; in mmu_shrink_scan() local
4720 list_for_each_entry(kvm, &vm_list, vm_list) { in mmu_shrink_scan()
4738 if (!kvm->arch.n_used_mmu_pages && in mmu_shrink_scan()
4739 !kvm_has_zapped_obsolete_pages(kvm)) in mmu_shrink_scan()
4742 idx = srcu_read_lock(&kvm->srcu); in mmu_shrink_scan()
4743 spin_lock(&kvm->mmu_lock); in mmu_shrink_scan()
4745 if (kvm_has_zapped_obsolete_pages(kvm)) { in mmu_shrink_scan()
4746 kvm_mmu_commit_zap_page(kvm, in mmu_shrink_scan()
4747 &kvm->arch.zapped_obsolete_pages); in mmu_shrink_scan()
4751 if (prepare_zap_oldest_mmu_page(kvm, &invalid_list)) in mmu_shrink_scan()
4753 kvm_mmu_commit_zap_page(kvm, &invalid_list); in mmu_shrink_scan()
4756 spin_unlock(&kvm->mmu_lock); in mmu_shrink_scan()
4757 srcu_read_unlock(&kvm->srcu, idx); in mmu_shrink_scan()
4764 list_move_tail(&kvm->vm_list, &vm_list); in mmu_shrink_scan()
4821 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm) in kvm_mmu_calculate_mmu_pages() argument
4828 slots = kvm_memslots(kvm); in kvm_mmu_calculate_mmu_pages()