Lines Matching refs:kvm
784 static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) in account_shadowed() argument
793 slots = kvm_memslots_for_spte_role(kvm, sp->role); in account_shadowed()
799 kvm->arch.indirect_shadow_pages++; in account_shadowed()
802 static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) in unaccount_shadowed() argument
811 slots = kvm_memslots_for_spte_role(kvm, sp->role); in unaccount_shadowed()
818 kvm->arch.indirect_shadow_pages--; in unaccount_shadowed()
842 static int host_mapping_level(struct kvm *kvm, gfn_t gfn) in host_mapping_level() argument
847 page_size = kvm_host_page_size(kvm, gfn); in host_mapping_level()
897 host_level = host_mapping_level(vcpu->kvm, large_gfn); in mapping_level()
1047 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, struct kvm_mmu_page *sp) in gfn_to_rmap() argument
1052 slots = kvm_memslots_for_spte_role(kvm, sp->role); in gfn_to_rmap()
1072 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp); in rmap_add()
1076 static void rmap_remove(struct kvm *kvm, u64 *spte) in rmap_remove() argument
1084 rmapp = gfn_to_rmap(kvm, gfn, sp); in rmap_remove()
1154 static void drop_spte(struct kvm *kvm, u64 *sptep) in drop_spte() argument
1157 rmap_remove(kvm, sptep); in drop_spte()
1161 static bool __drop_large_spte(struct kvm *kvm, u64 *sptep) in __drop_large_spte() argument
1166 drop_spte(kvm, sptep); in __drop_large_spte()
1167 --kvm->stat.lpages; in __drop_large_spte()
1176 if (__drop_large_spte(vcpu->kvm, sptep)) in drop_large_spte()
1177 kvm_flush_remote_tlbs(vcpu->kvm); in drop_large_spte()
1193 static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool pt_protect) in spte_write_protect() argument
1210 static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, in __rmap_write_protect() argument
1218 flush |= spte_write_protect(kvm, sptep, pt_protect); in __rmap_write_protect()
1223 static bool spte_clear_dirty(struct kvm *kvm, u64 *sptep) in spte_clear_dirty() argument
1234 static bool __rmap_clear_dirty(struct kvm *kvm, unsigned long *rmapp) in __rmap_clear_dirty() argument
1241 flush |= spte_clear_dirty(kvm, sptep); in __rmap_clear_dirty()
1246 static bool spte_set_dirty(struct kvm *kvm, u64 *sptep) in spte_set_dirty() argument
1257 static bool __rmap_set_dirty(struct kvm *kvm, unsigned long *rmapp) in __rmap_set_dirty() argument
1264 flush |= spte_set_dirty(kvm, sptep); in __rmap_set_dirty()
1279 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, in kvm_mmu_write_protect_pt_masked() argument
1288 __rmap_write_protect(kvm, rmapp, false); in kvm_mmu_write_protect_pt_masked()
1304 void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, in kvm_mmu_clear_dirty_pt_masked() argument
1313 __rmap_clear_dirty(kvm, rmapp); in kvm_mmu_clear_dirty_pt_masked()
1331 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, in kvm_arch_mmu_enable_log_dirty_pt_masked() argument
1336 kvm_x86_ops->enable_log_dirty_pt_masked(kvm, slot, gfn_offset, in kvm_arch_mmu_enable_log_dirty_pt_masked()
1339 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); in kvm_arch_mmu_enable_log_dirty_pt_masked()
1353 write_protected |= __rmap_write_protect(vcpu->kvm, rmapp, true); in rmap_write_protect()
1359 static bool kvm_zap_rmapp(struct kvm *kvm, unsigned long *rmapp) in kvm_zap_rmapp() argument
1369 drop_spte(kvm, sptep); in kvm_zap_rmapp()
1376 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, in kvm_unmap_rmapp() argument
1380 return kvm_zap_rmapp(kvm, rmapp); in kvm_unmap_rmapp()
1383 static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, in kvm_set_pte_rmapp() argument
1405 drop_spte(kvm, sptep); in kvm_set_pte_rmapp()
1421 kvm_flush_remote_tlbs(kvm); in kvm_set_pte_rmapp()
1494 static int kvm_handle_hva_range(struct kvm *kvm, in kvm_handle_hva_range() argument
1498 int (*handler)(struct kvm *kvm, in kvm_handle_hva_range() argument
1512 slots = __kvm_memslots(kvm, i); in kvm_handle_hva_range()
1533 ret |= handler(kvm, iterator.rmap, memslot, in kvm_handle_hva_range()
1541 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, in kvm_handle_hva() argument
1543 int (*handler)(struct kvm *kvm, unsigned long *rmapp, in kvm_handle_hva() argument
1548 return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler); in kvm_handle_hva()
1551 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) in kvm_unmap_hva() argument
1553 return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp); in kvm_unmap_hva()
1556 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) in kvm_unmap_hva_range() argument
1558 return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp); in kvm_unmap_hva_range()
1561 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) in kvm_set_spte_hva() argument
1563 kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp); in kvm_set_spte_hva()
1566 static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, in kvm_age_rmapp() argument
1587 static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, in kvm_test_age_rmapp() argument
1621 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp); in rmap_recycle()
1623 kvm_unmap_rmapp(vcpu->kvm, rmapp, NULL, gfn, sp->role.level, 0); in rmap_recycle()
1624 kvm_flush_remote_tlbs(vcpu->kvm); in rmap_recycle()
1627 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) in kvm_age_hva() argument
1644 kvm->mmu_notifier_seq++; in kvm_age_hva()
1645 return kvm_handle_hva_range(kvm, start, end, 0, in kvm_age_hva()
1649 return kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp); in kvm_age_hva()
1652 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) in kvm_test_age_hva() argument
1654 return kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp); in kvm_test_age_hva()
1679 static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr) in kvm_mod_used_mmu_pages() argument
1681 kvm->arch.n_used_mmu_pages += nr; in kvm_mod_used_mmu_pages()
1739 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); in kvm_mmu_alloc_page()
1742 kvm_mod_used_mmu_pages(vcpu->kvm, +1); in kvm_mmu_alloc_page()
1863 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_unlink_unsync_page() argument
1868 --kvm->stat.mmu_unsync; in kvm_unlink_unsync_page()
1871 static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1873 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1900 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); in __kvm_sync_page()
1905 kvm_unlink_unsync_page(vcpu->kvm, sp); in __kvm_sync_page()
1908 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); in __kvm_sync_page()
1924 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in kvm_sync_page_transient()
1949 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { in kvm_sync_pages()
1954 kvm_unlink_unsync_page(vcpu->kvm, s); in kvm_sync_pages()
1957 kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list); in kvm_sync_pages()
1963 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in kvm_sync_pages()
2044 kvm_flush_remote_tlbs(vcpu->kvm); in mmu_sync_children()
2050 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in mmu_sync_children()
2051 cond_resched_lock(&vcpu->kvm->mmu_lock); in mmu_sync_children()
2076 static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp) in is_obsolete_sp() argument
2078 return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen); in is_obsolete_sp()
2106 for_each_gfn_sp(vcpu->kvm, sp, gfn) { in kvm_mmu_get_page()
2107 if (is_obsolete_sp(vcpu->kvm, sp)) in kvm_mmu_get_page()
2130 ++vcpu->kvm->stat.mmu_cache_miss; in kvm_mmu_get_page()
2137 &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]); in kvm_mmu_get_page()
2140 kvm_flush_remote_tlbs(vcpu->kvm); in kvm_mmu_get_page()
2144 account_shadowed(vcpu->kvm, sp); in kvm_mmu_get_page()
2146 sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen; in kvm_mmu_get_page()
2235 kvm_flush_remote_tlbs(vcpu->kvm); in validate_direct_spte()
2239 static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, in mmu_page_zap_pte() argument
2248 drop_spte(kvm, spte); in mmu_page_zap_pte()
2250 --kvm->stat.lpages; in mmu_page_zap_pte()
2264 static void kvm_mmu_page_unlink_children(struct kvm *kvm, in kvm_mmu_page_unlink_children() argument
2270 mmu_page_zap_pte(kvm, sp, sp->spt + i); in kvm_mmu_page_unlink_children()
2278 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_mmu_unlink_parents() argument
2287 static int mmu_zap_unsync_children(struct kvm *kvm, in mmu_zap_unsync_children() argument
2303 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); in mmu_zap_unsync_children()
2313 static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, in kvm_mmu_prepare_zap_page() argument
2319 ++kvm->stat.mmu_shadow_zapped; in kvm_mmu_prepare_zap_page()
2320 ret = mmu_zap_unsync_children(kvm, sp, invalid_list); in kvm_mmu_prepare_zap_page()
2321 kvm_mmu_page_unlink_children(kvm, sp); in kvm_mmu_prepare_zap_page()
2322 kvm_mmu_unlink_parents(kvm, sp); in kvm_mmu_prepare_zap_page()
2325 unaccount_shadowed(kvm, sp); in kvm_mmu_prepare_zap_page()
2328 kvm_unlink_unsync_page(kvm, sp); in kvm_mmu_prepare_zap_page()
2333 kvm_mod_used_mmu_pages(kvm, -1); in kvm_mmu_prepare_zap_page()
2335 list_move(&sp->link, &kvm->arch.active_mmu_pages); in kvm_mmu_prepare_zap_page()
2341 if (!sp->role.invalid && !is_obsolete_sp(kvm, sp)) in kvm_mmu_prepare_zap_page()
2342 kvm_reload_remote_mmus(kvm); in kvm_mmu_prepare_zap_page()
2349 static void kvm_mmu_commit_zap_page(struct kvm *kvm, in kvm_mmu_commit_zap_page() argument
2367 kvm_flush_remote_tlbs(kvm); in kvm_mmu_commit_zap_page()
2375 static bool prepare_zap_oldest_mmu_page(struct kvm *kvm, in prepare_zap_oldest_mmu_page() argument
2380 if (list_empty(&kvm->arch.active_mmu_pages)) in prepare_zap_oldest_mmu_page()
2383 sp = list_entry(kvm->arch.active_mmu_pages.prev, in prepare_zap_oldest_mmu_page()
2385 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); in prepare_zap_oldest_mmu_page()
2394 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages) in kvm_mmu_change_mmu_pages() argument
2398 spin_lock(&kvm->mmu_lock); in kvm_mmu_change_mmu_pages()
2400 if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) { in kvm_mmu_change_mmu_pages()
2402 while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) in kvm_mmu_change_mmu_pages()
2403 if (!prepare_zap_oldest_mmu_page(kvm, &invalid_list)) in kvm_mmu_change_mmu_pages()
2406 kvm_mmu_commit_zap_page(kvm, &invalid_list); in kvm_mmu_change_mmu_pages()
2407 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages; in kvm_mmu_change_mmu_pages()
2410 kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages; in kvm_mmu_change_mmu_pages()
2412 spin_unlock(&kvm->mmu_lock); in kvm_mmu_change_mmu_pages()
2415 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) in kvm_mmu_unprotect_page() argument
2423 spin_lock(&kvm->mmu_lock); in kvm_mmu_unprotect_page()
2424 for_each_gfn_indirect_valid_sp(kvm, sp, gfn) { in kvm_mmu_unprotect_page()
2428 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); in kvm_mmu_unprotect_page()
2430 kvm_mmu_commit_zap_page(kvm, &invalid_list); in kvm_mmu_unprotect_page()
2431 spin_unlock(&kvm->mmu_lock); in kvm_mmu_unprotect_page()
2440 ++vcpu->kvm->stat.mmu_unsync; in __kvm_unsync_page()
2450 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { in kvm_unsync_pages()
2464 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { in mmu_need_write_protect()
2562 kvm_flush_remote_tlbs(vcpu->kvm); in set_spte()
2590 kvm_flush_remote_tlbs(vcpu->kvm); in mmu_set_spte()
2594 drop_spte(vcpu->kvm, sptep); in mmu_set_spte()
2595 kvm_flush_remote_tlbs(vcpu->kvm); in mmu_set_spte()
2616 ++vcpu->kvm->stat.lpages; in mmu_set_spte()
3006 mmu_seq = vcpu->kvm->mmu_notifier_seq; in nonpaging_map()
3015 spin_lock(&vcpu->kvm->mmu_lock); in nonpaging_map()
3016 if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) in nonpaging_map()
3023 spin_unlock(&vcpu->kvm->mmu_lock); in nonpaging_map()
3029 spin_unlock(&vcpu->kvm->mmu_lock); in nonpaging_map()
3049 spin_lock(&vcpu->kvm->mmu_lock); in mmu_free_roots()
3053 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); in mmu_free_roots()
3054 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in mmu_free_roots()
3056 spin_unlock(&vcpu->kvm->mmu_lock); in mmu_free_roots()
3061 spin_lock(&vcpu->kvm->mmu_lock); in mmu_free_roots()
3070 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, in mmu_free_roots()
3075 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in mmu_free_roots()
3076 spin_unlock(&vcpu->kvm->mmu_lock); in mmu_free_roots()
3084 if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) { in mmu_check_root()
3098 spin_lock(&vcpu->kvm->mmu_lock); in mmu_alloc_direct_roots()
3103 spin_unlock(&vcpu->kvm->mmu_lock); in mmu_alloc_direct_roots()
3110 spin_lock(&vcpu->kvm->mmu_lock); in mmu_alloc_direct_roots()
3118 spin_unlock(&vcpu->kvm->mmu_lock); in mmu_alloc_direct_roots()
3149 spin_lock(&vcpu->kvm->mmu_lock); in mmu_alloc_shadow_roots()
3155 spin_unlock(&vcpu->kvm->mmu_lock); in mmu_alloc_shadow_roots()
3183 spin_lock(&vcpu->kvm->mmu_lock); in mmu_alloc_shadow_roots()
3190 spin_unlock(&vcpu->kvm->mmu_lock); in mmu_alloc_shadow_roots()
3266 spin_lock(&vcpu->kvm->mmu_lock); in kvm_mmu_sync_roots()
3268 spin_unlock(&vcpu->kvm->mmu_lock); in kvm_mmu_sync_roots()
3519 mmu_seq = vcpu->kvm->mmu_notifier_seq; in tdp_page_fault()
3528 spin_lock(&vcpu->kvm->mmu_lock); in tdp_page_fault()
3529 if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) in tdp_page_fault()
3536 spin_unlock(&vcpu->kvm->mmu_lock); in tdp_page_fault()
3541 spin_unlock(&vcpu->kvm->mmu_lock); in tdp_page_fault()
4140 ++vcpu->kvm->stat.mmu_pde_zapped; in mmu_pte_write_new_pte()
4144 ++vcpu->kvm->stat.mmu_pte_updated; in mmu_pte_write_new_pte()
4168 kvm_flush_remote_tlbs(vcpu->kvm); in mmu_pte_write_flush_tlb()
4306 if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages)) in kvm_mmu_pte_write()
4322 spin_lock(&vcpu->kvm->mmu_lock); in kvm_mmu_pte_write()
4323 ++vcpu->kvm->stat.mmu_pte_write; in kvm_mmu_pte_write()
4326 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { in kvm_mmu_pte_write()
4329 zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp, in kvm_mmu_pte_write()
4331 ++vcpu->kvm->stat.mmu_flooded; in kvm_mmu_pte_write()
4342 mmu_page_zap_pte(vcpu->kvm, sp, spte); in kvm_mmu_pte_write()
4353 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in kvm_mmu_pte_write()
4355 spin_unlock(&vcpu->kvm->mmu_lock); in kvm_mmu_pte_write()
4368 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); in kvm_mmu_unprotect_page_virt()
4378 if (likely(kvm_mmu_available_pages(vcpu->kvm) >= KVM_MIN_FREE_MMU_PAGES)) in make_mmu_pages_available()
4381 while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) { in make_mmu_pages_available()
4382 if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list)) in make_mmu_pages_available()
4385 ++vcpu->kvm->stat.mmu_recycled; in make_mmu_pages_available()
4387 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in make_mmu_pages_available()
4500 typedef bool (*slot_level_handler) (struct kvm *kvm, unsigned long *rmap);
4504 slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, in slot_handle_level_range() argument
4514 flush |= fn(kvm, iterator.rmap); in slot_handle_level_range()
4516 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { in slot_handle_level_range()
4518 kvm_flush_remote_tlbs(kvm); in slot_handle_level_range()
4521 cond_resched_lock(&kvm->mmu_lock); in slot_handle_level_range()
4526 kvm_flush_remote_tlbs(kvm); in slot_handle_level_range()
4534 slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, in slot_handle_level() argument
4538 return slot_handle_level_range(kvm, memslot, fn, start_level, in slot_handle_level()
4545 slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot, in slot_handle_all_level() argument
4548 return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL, in slot_handle_all_level()
4553 slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot, in slot_handle_large_level() argument
4556 return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL + 1, in slot_handle_large_level()
4561 slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot, in slot_handle_leaf() argument
4564 return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL, in slot_handle_leaf()
4568 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) in kvm_zap_gfn_range() argument
4574 spin_lock(&kvm->mmu_lock); in kvm_zap_gfn_range()
4576 slots = __kvm_memslots(kvm, i); in kvm_zap_gfn_range()
4585 slot_handle_level_range(kvm, memslot, kvm_zap_rmapp, in kvm_zap_gfn_range()
4591 spin_unlock(&kvm->mmu_lock); in kvm_zap_gfn_range()
4594 static bool slot_rmap_write_protect(struct kvm *kvm, unsigned long *rmapp) in slot_rmap_write_protect() argument
4596 return __rmap_write_protect(kvm, rmapp, false); in slot_rmap_write_protect()
4599 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, in kvm_mmu_slot_remove_write_access() argument
4604 spin_lock(&kvm->mmu_lock); in kvm_mmu_slot_remove_write_access()
4605 flush = slot_handle_all_level(kvm, memslot, slot_rmap_write_protect, in kvm_mmu_slot_remove_write_access()
4607 spin_unlock(&kvm->mmu_lock); in kvm_mmu_slot_remove_write_access()
4614 lockdep_assert_held(&kvm->slots_lock); in kvm_mmu_slot_remove_write_access()
4628 kvm_flush_remote_tlbs(kvm); in kvm_mmu_slot_remove_write_access()
4631 static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, in kvm_mmu_zap_collapsible_spte() argument
4655 drop_spte(kvm, sptep); in kvm_mmu_zap_collapsible_spte()
4664 void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, in kvm_mmu_zap_collapsible_sptes() argument
4668 spin_lock(&kvm->mmu_lock); in kvm_mmu_zap_collapsible_sptes()
4669 slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot, in kvm_mmu_zap_collapsible_sptes()
4671 spin_unlock(&kvm->mmu_lock); in kvm_mmu_zap_collapsible_sptes()
4674 void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, in kvm_mmu_slot_leaf_clear_dirty() argument
4679 spin_lock(&kvm->mmu_lock); in kvm_mmu_slot_leaf_clear_dirty()
4680 flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false); in kvm_mmu_slot_leaf_clear_dirty()
4681 spin_unlock(&kvm->mmu_lock); in kvm_mmu_slot_leaf_clear_dirty()
4683 lockdep_assert_held(&kvm->slots_lock); in kvm_mmu_slot_leaf_clear_dirty()
4692 kvm_flush_remote_tlbs(kvm); in kvm_mmu_slot_leaf_clear_dirty()
4696 void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm, in kvm_mmu_slot_largepage_remove_write_access() argument
4701 spin_lock(&kvm->mmu_lock); in kvm_mmu_slot_largepage_remove_write_access()
4702 flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect, in kvm_mmu_slot_largepage_remove_write_access()
4704 spin_unlock(&kvm->mmu_lock); in kvm_mmu_slot_largepage_remove_write_access()
4707 lockdep_assert_held(&kvm->slots_lock); in kvm_mmu_slot_largepage_remove_write_access()
4710 kvm_flush_remote_tlbs(kvm); in kvm_mmu_slot_largepage_remove_write_access()
4714 void kvm_mmu_slot_set_dirty(struct kvm *kvm, in kvm_mmu_slot_set_dirty() argument
4719 spin_lock(&kvm->mmu_lock); in kvm_mmu_slot_set_dirty()
4720 flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false); in kvm_mmu_slot_set_dirty()
4721 spin_unlock(&kvm->mmu_lock); in kvm_mmu_slot_set_dirty()
4723 lockdep_assert_held(&kvm->slots_lock); in kvm_mmu_slot_set_dirty()
4727 kvm_flush_remote_tlbs(kvm); in kvm_mmu_slot_set_dirty()
4732 static void kvm_zap_obsolete_pages(struct kvm *kvm) in kvm_zap_obsolete_pages() argument
4739 &kvm->arch.active_mmu_pages, link) { in kvm_zap_obsolete_pages()
4746 if (!is_obsolete_sp(kvm, sp)) in kvm_zap_obsolete_pages()
4762 cond_resched_lock(&kvm->mmu_lock)) { in kvm_zap_obsolete_pages()
4767 ret = kvm_mmu_prepare_zap_page(kvm, sp, in kvm_zap_obsolete_pages()
4768 &kvm->arch.zapped_obsolete_pages); in kvm_zap_obsolete_pages()
4779 kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages); in kvm_zap_obsolete_pages()
4791 void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm) in kvm_mmu_invalidate_zap_all_pages() argument
4793 spin_lock(&kvm->mmu_lock); in kvm_mmu_invalidate_zap_all_pages()
4794 trace_kvm_mmu_invalidate_zap_all_pages(kvm); in kvm_mmu_invalidate_zap_all_pages()
4795 kvm->arch.mmu_valid_gen++; in kvm_mmu_invalidate_zap_all_pages()
4806 kvm_reload_remote_mmus(kvm); in kvm_mmu_invalidate_zap_all_pages()
4808 kvm_zap_obsolete_pages(kvm); in kvm_mmu_invalidate_zap_all_pages()
4809 spin_unlock(&kvm->mmu_lock); in kvm_mmu_invalidate_zap_all_pages()
4812 static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm) in kvm_has_zapped_obsolete_pages() argument
4814 return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages)); in kvm_has_zapped_obsolete_pages()
4817 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots) in kvm_mmu_invalidate_mmio_sptes() argument
4825 kvm_mmu_invalidate_zap_all_pages(kvm); in kvm_mmu_invalidate_mmio_sptes()
4832 struct kvm *kvm; in mmu_shrink_scan() local
4838 list_for_each_entry(kvm, &vm_list, vm_list) { in mmu_shrink_scan()
4856 if (!kvm->arch.n_used_mmu_pages && in mmu_shrink_scan()
4857 !kvm_has_zapped_obsolete_pages(kvm)) in mmu_shrink_scan()
4860 idx = srcu_read_lock(&kvm->srcu); in mmu_shrink_scan()
4861 spin_lock(&kvm->mmu_lock); in mmu_shrink_scan()
4863 if (kvm_has_zapped_obsolete_pages(kvm)) { in mmu_shrink_scan()
4864 kvm_mmu_commit_zap_page(kvm, in mmu_shrink_scan()
4865 &kvm->arch.zapped_obsolete_pages); in mmu_shrink_scan()
4869 if (prepare_zap_oldest_mmu_page(kvm, &invalid_list)) in mmu_shrink_scan()
4871 kvm_mmu_commit_zap_page(kvm, &invalid_list); in mmu_shrink_scan()
4874 spin_unlock(&kvm->mmu_lock); in mmu_shrink_scan()
4875 srcu_read_unlock(&kvm->srcu, idx); in mmu_shrink_scan()
4882 list_move_tail(&kvm->vm_list, &vm_list); in mmu_shrink_scan()
4939 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm) in kvm_mmu_calculate_mmu_pages() argument
4948 slots = __kvm_memslots(kvm, i); in kvm_mmu_calculate_mmu_pages()