Lines Matching refs:sp

371 	struct kvm_mmu_page *sp =  page_header(__pa(sptep));  in count_spte_clear()  local
378 sp->clear_spte_count++; in count_spte_clear()
455 struct kvm_mmu_page *sp = page_header(__pa(sptep)); in __get_spte_lockless() local
460 count = sp->clear_spte_count; in __get_spte_lockless()
470 count != sp->clear_spte_count)) in __get_spte_lockless()
754 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index) in kvm_mmu_page_get_gfn() argument
756 if (!sp->role.direct) in kvm_mmu_page_get_gfn()
757 return sp->gfns[index]; in kvm_mmu_page_get_gfn()
759 return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS)); in kvm_mmu_page_get_gfn()
762 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) in kvm_mmu_page_set_gfn() argument
764 if (sp->role.direct) in kvm_mmu_page_set_gfn()
765 BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index)); in kvm_mmu_page_set_gfn()
767 sp->gfns[index] = gfn; in kvm_mmu_page_set_gfn()
784 static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) in account_shadowed() argument
792 gfn = sp->gfn; in account_shadowed()
793 slots = kvm_memslots_for_spte_role(kvm, sp->role); in account_shadowed()
802 static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) in unaccount_shadowed() argument
810 gfn = sp->gfn; in unaccount_shadowed()
811 slots = kvm_memslots_for_spte_role(kvm, sp->role); in unaccount_shadowed()
1047 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, struct kvm_mmu_page *sp) in gfn_to_rmap() argument
1052 slots = kvm_memslots_for_spte_role(kvm, sp->role); in gfn_to_rmap()
1054 return __gfn_to_rmap(gfn, sp->role.level, slot); in gfn_to_rmap()
1067 struct kvm_mmu_page *sp; in rmap_add() local
1070 sp = page_header(__pa(spte)); in rmap_add()
1071 kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); in rmap_add()
1072 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp); in rmap_add()
1078 struct kvm_mmu_page *sp; in rmap_remove() local
1082 sp = page_header(__pa(spte)); in rmap_remove()
1083 gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); in rmap_remove()
1084 rmapp = gfn_to_rmap(kvm, gfn, sp); in rmap_remove()
1617 struct kvm_mmu_page *sp; in rmap_recycle() local
1619 sp = page_header(__pa(spte)); in rmap_recycle()
1621 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp); in rmap_recycle()
1623 kvm_unmap_rmapp(vcpu->kvm, rmapp, NULL, gfn, sp->role.level, 0); in rmap_recycle()
1685 static void kvm_mmu_free_page(struct kvm_mmu_page *sp) in kvm_mmu_free_page() argument
1687 MMU_WARN_ON(!is_empty_shadow_page(sp->spt)); in kvm_mmu_free_page()
1688 hlist_del(&sp->hash_link); in kvm_mmu_free_page()
1689 list_del(&sp->link); in kvm_mmu_free_page()
1690 free_page((unsigned long)sp->spt); in kvm_mmu_free_page()
1691 if (!sp->role.direct) in kvm_mmu_free_page()
1692 free_page((unsigned long)sp->gfns); in kvm_mmu_free_page()
1693 kmem_cache_free(mmu_page_header_cache, sp); in kvm_mmu_free_page()
1702 struct kvm_mmu_page *sp, u64 *parent_pte) in mmu_page_add_parent_pte() argument
1707 pte_list_add(vcpu, parent_pte, &sp->parent_ptes); in mmu_page_add_parent_pte()
1710 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp, in mmu_page_remove_parent_pte() argument
1713 pte_list_remove(parent_pte, &sp->parent_ptes); in mmu_page_remove_parent_pte()
1716 static void drop_parent_pte(struct kvm_mmu_page *sp, in drop_parent_pte() argument
1719 mmu_page_remove_parent_pte(sp, parent_pte); in drop_parent_pte()
1726 struct kvm_mmu_page *sp; in kvm_mmu_alloc_page() local
1728 sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); in kvm_mmu_alloc_page()
1729 sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); in kvm_mmu_alloc_page()
1731 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); in kvm_mmu_alloc_page()
1732 set_page_private(virt_to_page(sp->spt), (unsigned long)sp); in kvm_mmu_alloc_page()
1739 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); in kvm_mmu_alloc_page()
1740 sp->parent_ptes = 0; in kvm_mmu_alloc_page()
1741 mmu_page_add_parent_pte(vcpu, sp, parent_pte); in kvm_mmu_alloc_page()
1743 return sp; in kvm_mmu_alloc_page()
1747 static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp) in kvm_mmu_mark_parents_unsync() argument
1749 pte_list_walk(&sp->parent_ptes, mark_unsync); in kvm_mmu_mark_parents_unsync()
1754 struct kvm_mmu_page *sp; in mark_unsync() local
1757 sp = page_header(__pa(spte)); in mark_unsync()
1758 index = spte - sp->spt; in mark_unsync()
1759 if (__test_and_set_bit(index, sp->unsync_child_bitmap)) in mark_unsync()
1761 if (sp->unsync_children++) in mark_unsync()
1763 kvm_mmu_mark_parents_unsync(sp); in mark_unsync()
1767 struct kvm_mmu_page *sp) in nonpaging_sync_page() argument
1777 struct kvm_mmu_page *sp, u64 *spte, in nonpaging_update_pte() argument
1787 struct kvm_mmu_page *sp; member
1793 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, in mmu_pages_add() argument
1798 if (sp->unsync) in mmu_pages_add()
1800 if (pvec->page[i].sp == sp) in mmu_pages_add()
1803 pvec->page[pvec->nr].sp = sp; in mmu_pages_add()
1809 static int __mmu_unsync_walk(struct kvm_mmu_page *sp, in __mmu_unsync_walk() argument
1814 for_each_set_bit(i, sp->unsync_child_bitmap, 512) { in __mmu_unsync_walk()
1816 u64 ent = sp->spt[i]; in __mmu_unsync_walk()
1844 __clear_bit(i, sp->unsync_child_bitmap); in __mmu_unsync_walk()
1845 sp->unsync_children--; in __mmu_unsync_walk()
1846 WARN_ON((int)sp->unsync_children < 0); in __mmu_unsync_walk()
1853 static int mmu_unsync_walk(struct kvm_mmu_page *sp, in mmu_unsync_walk() argument
1856 if (!sp->unsync_children) in mmu_unsync_walk()
1859 mmu_pages_add(pvec, sp, 0); in mmu_unsync_walk()
1860 return __mmu_unsync_walk(sp, pvec); in mmu_unsync_walk()
1863 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_unlink_unsync_page() argument
1865 WARN_ON(!sp->unsync); in kvm_unlink_unsync_page()
1866 trace_kvm_mmu_sync_page(sp); in kvm_unlink_unsync_page()
1867 sp->unsync = 0; in kvm_unlink_unsync_page()
1871 static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1896 static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, in __kvm_sync_page() argument
1899 if (sp->role.cr4_pae != !!is_pae(vcpu)) { in __kvm_sync_page()
1900 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); in __kvm_sync_page()
1905 kvm_unlink_unsync_page(vcpu->kvm, sp); in __kvm_sync_page()
1907 if (vcpu->arch.mmu.sync_page(vcpu, sp)) { in __kvm_sync_page()
1908 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); in __kvm_sync_page()
1917 struct kvm_mmu_page *sp) in kvm_sync_page_transient() argument
1922 ret = __kvm_sync_page(vcpu, sp, &invalid_list, false); in kvm_sync_page_transient()
1936 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, in kvm_sync_page() argument
1939 return __kvm_sync_page(vcpu, sp, invalid_list, true); in kvm_sync_page()
1973 #define for_each_sp(pvec, sp, parents, i) \ argument
1975 sp = pvec.page[i].sp; \
1976 i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \
1986 struct kvm_mmu_page *sp = pvec->page[n].sp; in mmu_pages_next() local
1988 if (sp->role.level == PT_PAGE_TABLE_LEVEL) { in mmu_pages_next()
1993 parents->parent[sp->role.level-2] = sp; in mmu_pages_next()
1994 parents->idx[sp->role.level-1] = pvec->page[n].idx; in mmu_pages_next()
2002 struct kvm_mmu_page *sp; in mmu_pages_clear_parents() local
2008 sp = parents->parent[level]; in mmu_pages_clear_parents()
2009 if (!sp) in mmu_pages_clear_parents()
2012 --sp->unsync_children; in mmu_pages_clear_parents()
2013 WARN_ON((int)sp->unsync_children < 0); in mmu_pages_clear_parents()
2014 __clear_bit(idx, sp->unsync_child_bitmap); in mmu_pages_clear_parents()
2016 } while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children); in mmu_pages_clear_parents()
2031 struct kvm_mmu_page *sp; in mmu_sync_children() local
2040 for_each_sp(pages, sp, parents, i) in mmu_sync_children()
2041 protected |= rmap_write_protect(vcpu, sp->gfn); in mmu_sync_children()
2046 for_each_sp(pages, sp, parents, i) { in mmu_sync_children()
2047 kvm_sync_page(vcpu, sp, &invalid_list); in mmu_sync_children()
2056 static void init_shadow_page_table(struct kvm_mmu_page *sp) in init_shadow_page_table() argument
2061 sp->spt[i] = 0ull; in init_shadow_page_table()
2064 static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp) in __clear_sp_write_flooding_count() argument
2066 sp->write_flooding_count = 0; in __clear_sp_write_flooding_count()
2071 struct kvm_mmu_page *sp = page_header(__pa(spte)); in clear_sp_write_flooding_count() local
2073 __clear_sp_write_flooding_count(sp); in clear_sp_write_flooding_count()
2076 static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp) in is_obsolete_sp() argument
2078 return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen); in is_obsolete_sp()
2091 struct kvm_mmu_page *sp; in kvm_mmu_get_page() local
2106 for_each_gfn_sp(vcpu->kvm, sp, gfn) { in kvm_mmu_get_page()
2107 if (is_obsolete_sp(vcpu->kvm, sp)) in kvm_mmu_get_page()
2110 if (!need_sync && sp->unsync) in kvm_mmu_get_page()
2113 if (sp->role.word != role.word) in kvm_mmu_get_page()
2116 if (sp->unsync && kvm_sync_page_transient(vcpu, sp)) in kvm_mmu_get_page()
2119 mmu_page_add_parent_pte(vcpu, sp, parent_pte); in kvm_mmu_get_page()
2120 if (sp->unsync_children) { in kvm_mmu_get_page()
2122 kvm_mmu_mark_parents_unsync(sp); in kvm_mmu_get_page()
2123 } else if (sp->unsync) in kvm_mmu_get_page()
2124 kvm_mmu_mark_parents_unsync(sp); in kvm_mmu_get_page()
2126 __clear_sp_write_flooding_count(sp); in kvm_mmu_get_page()
2127 trace_kvm_mmu_get_page(sp, false); in kvm_mmu_get_page()
2128 return sp; in kvm_mmu_get_page()
2131 sp = kvm_mmu_alloc_page(vcpu, parent_pte, direct); in kvm_mmu_get_page()
2132 if (!sp) in kvm_mmu_get_page()
2133 return sp; in kvm_mmu_get_page()
2134 sp->gfn = gfn; in kvm_mmu_get_page()
2135 sp->role = role; in kvm_mmu_get_page()
2136 hlist_add_head(&sp->hash_link, in kvm_mmu_get_page()
2144 account_shadowed(vcpu->kvm, sp); in kvm_mmu_get_page()
2146 sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen; in kvm_mmu_get_page()
2147 init_shadow_page_table(sp); in kvm_mmu_get_page()
2148 trace_kvm_mmu_get_page(sp, true); in kvm_mmu_get_page()
2149 return sp; in kvm_mmu_get_page()
2201 static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp, bool accessed) in link_shadow_page() argument
2208 spte = __pa(sp->spt) | PT_PRESENT_MASK | PT_WRITABLE_MASK | in link_shadow_page()
2239 static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, in mmu_page_zap_pte() argument
2247 if (is_last_spte(pte, sp->role.level)) { in mmu_page_zap_pte()
2265 struct kvm_mmu_page *sp) in kvm_mmu_page_unlink_children() argument
2270 mmu_page_zap_pte(kvm, sp, sp->spt + i); in kvm_mmu_page_unlink_children()
2273 static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte) in kvm_mmu_put_page() argument
2275 mmu_page_remove_parent_pte(sp, parent_pte); in kvm_mmu_put_page()
2278 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_mmu_unlink_parents() argument
2283 while ((sptep = rmap_get_first(sp->parent_ptes, &iter))) in kvm_mmu_unlink_parents()
2284 drop_parent_pte(sp, sptep); in kvm_mmu_unlink_parents()
2300 struct kvm_mmu_page *sp; in mmu_zap_unsync_children() local
2302 for_each_sp(pages, sp, parents, i) { in mmu_zap_unsync_children()
2303 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); in mmu_zap_unsync_children()
2313 static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, in kvm_mmu_prepare_zap_page() argument
2318 trace_kvm_mmu_prepare_zap_page(sp); in kvm_mmu_prepare_zap_page()
2320 ret = mmu_zap_unsync_children(kvm, sp, invalid_list); in kvm_mmu_prepare_zap_page()
2321 kvm_mmu_page_unlink_children(kvm, sp); in kvm_mmu_prepare_zap_page()
2322 kvm_mmu_unlink_parents(kvm, sp); in kvm_mmu_prepare_zap_page()
2324 if (!sp->role.invalid && !sp->role.direct) in kvm_mmu_prepare_zap_page()
2325 unaccount_shadowed(kvm, sp); in kvm_mmu_prepare_zap_page()
2327 if (sp->unsync) in kvm_mmu_prepare_zap_page()
2328 kvm_unlink_unsync_page(kvm, sp); in kvm_mmu_prepare_zap_page()
2329 if (!sp->root_count) { in kvm_mmu_prepare_zap_page()
2332 list_move(&sp->link, invalid_list); in kvm_mmu_prepare_zap_page()
2335 list_move(&sp->link, &kvm->arch.active_mmu_pages); in kvm_mmu_prepare_zap_page()
2341 if (!sp->role.invalid && !is_obsolete_sp(kvm, sp)) in kvm_mmu_prepare_zap_page()
2345 sp->role.invalid = 1; in kvm_mmu_prepare_zap_page()
2352 struct kvm_mmu_page *sp, *nsp; in kvm_mmu_commit_zap_page() local
2369 list_for_each_entry_safe(sp, nsp, invalid_list, link) { in kvm_mmu_commit_zap_page()
2370 WARN_ON(!sp->role.invalid || sp->root_count); in kvm_mmu_commit_zap_page()
2371 kvm_mmu_free_page(sp); in kvm_mmu_commit_zap_page()
2378 struct kvm_mmu_page *sp; in prepare_zap_oldest_mmu_page() local
2383 sp = list_entry(kvm->arch.active_mmu_pages.prev, in prepare_zap_oldest_mmu_page()
2385 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); in prepare_zap_oldest_mmu_page()
2417 struct kvm_mmu_page *sp; in kvm_mmu_unprotect_page() local
2424 for_each_gfn_indirect_valid_sp(kvm, sp, gfn) { in kvm_mmu_unprotect_page()
2426 sp->role.word); in kvm_mmu_unprotect_page()
2428 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); in kvm_mmu_unprotect_page()
2437 static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) in __kvm_unsync_page() argument
2439 trace_kvm_mmu_unsync_page(sp); in __kvm_unsync_page()
2441 sp->unsync = 1; in __kvm_unsync_page()
2443 kvm_mmu_mark_parents_unsync(sp); in __kvm_unsync_page()
2642 struct kvm_mmu_page *sp, in direct_pte_prefetch_many() argument
2647 unsigned access = sp->role.access; in direct_pte_prefetch_many()
2651 gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt); in direct_pte_prefetch_many()
2662 sp->role.level, gfn, page_to_pfn(pages[i]), in direct_pte_prefetch_many()
2669 struct kvm_mmu_page *sp, u64 *sptep) in __direct_pte_prefetch() argument
2674 WARN_ON(!sp->role.direct); in __direct_pte_prefetch()
2676 i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1); in __direct_pte_prefetch()
2677 spte = sp->spt + i; in __direct_pte_prefetch()
2683 if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0) in __direct_pte_prefetch()
2693 struct kvm_mmu_page *sp; in direct_pte_prefetch() local
2704 sp = page_header(__pa(sptep)); in direct_pte_prefetch()
2705 if (sp->role.level > PT_PAGE_TABLE_LEVEL) in direct_pte_prefetch()
2708 __direct_pte_prefetch(vcpu, sp, sptep); in direct_pte_prefetch()
2716 struct kvm_mmu_page *sp; in __direct_map() local
2739 sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr, in __direct_map()
2743 link_shadow_page(iterator.sptep, sp, true); in __direct_map()
2863 fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, in fast_pf_fix_direct_spte() argument
2868 WARN_ON(!sp->role.direct); in fast_pf_fix_direct_spte()
2874 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); in fast_pf_fix_direct_spte()
2903 struct kvm_mmu_page *sp; in fast_page_fault() local
2927 sp = page_header(__pa(iterator.sptep)); in fast_page_fault()
2928 if (!is_last_spte(spte, sp->role.level)) in fast_page_fault()
2959 if (sp->role.level > PT_PAGE_TABLE_LEVEL) in fast_page_fault()
2967 ret = fast_pf_fix_direct_spte(vcpu, sp, iterator.sptep, spte); in fast_page_fault()
3038 struct kvm_mmu_page *sp; in mmu_free_roots() local
3050 sp = page_header(root); in mmu_free_roots()
3051 --sp->root_count; in mmu_free_roots()
3052 if (!sp->root_count && sp->role.invalid) { in mmu_free_roots()
3053 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); in mmu_free_roots()
3067 sp = page_header(root); in mmu_free_roots()
3068 --sp->root_count; in mmu_free_roots()
3069 if (!sp->root_count && sp->role.invalid) in mmu_free_roots()
3070 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, in mmu_free_roots()
3094 struct kvm_mmu_page *sp; in mmu_alloc_direct_roots() local
3100 sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL, in mmu_alloc_direct_roots()
3102 ++sp->root_count; in mmu_alloc_direct_roots()
3104 vcpu->arch.mmu.root_hpa = __pa(sp->spt); in mmu_alloc_direct_roots()
3112 sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT), in mmu_alloc_direct_roots()
3116 root = __pa(sp->spt); in mmu_alloc_direct_roots()
3117 ++sp->root_count; in mmu_alloc_direct_roots()
3130 struct kvm_mmu_page *sp; in mmu_alloc_shadow_roots() local
3151 sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL, in mmu_alloc_shadow_roots()
3153 root = __pa(sp->spt); in mmu_alloc_shadow_roots()
3154 ++sp->root_count; in mmu_alloc_shadow_roots()
3185 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, in mmu_alloc_shadow_roots()
3188 root = __pa(sp->spt); in mmu_alloc_shadow_roots()
3189 ++sp->root_count; in mmu_alloc_shadow_roots()
3235 struct kvm_mmu_page *sp; in mmu_sync_roots() local
3247 sp = page_header(root); in mmu_sync_roots()
3248 mmu_sync_children(vcpu, sp); in mmu_sync_roots()
3257 sp = page_header(root); in mmu_sync_roots()
3258 mmu_sync_children(vcpu, sp); in mmu_sync_roots()
4136 struct kvm_mmu_page *sp, u64 *spte, in mmu_pte_write_new_pte() argument
4139 if (sp->role.level != PT_PAGE_TABLE_LEVEL) { in mmu_pte_write_new_pte()
4145 vcpu->arch.mmu.update_pte(vcpu, sp, spte, new); in mmu_pte_write_new_pte()
4213 static bool detect_write_flooding(struct kvm_mmu_page *sp) in detect_write_flooding() argument
4219 if (sp->role.level == PT_PAGE_TABLE_LEVEL) in detect_write_flooding()
4222 return ++sp->write_flooding_count >= 3; in detect_write_flooding()
4229 static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa, in detect_write_misaligned() argument
4235 gpa, bytes, sp->role.word); in detect_write_misaligned()
4238 pte_size = sp->role.cr4_pae ? 8 : 4; in detect_write_misaligned()
4253 static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte) in get_written_sptes() argument
4260 level = sp->role.level; in get_written_sptes()
4262 if (!sp->role.cr4_pae) { in get_written_sptes()
4276 if (quadrant != sp->role.quadrant) in get_written_sptes()
4280 spte = &sp->spt[page_offset / sizeof(*spte)]; in get_written_sptes()
4288 struct kvm_mmu_page *sp; in kvm_mmu_pte_write() local
4326 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { in kvm_mmu_pte_write()
4327 if (detect_write_misaligned(sp, gpa, bytes) || in kvm_mmu_pte_write()
4328 detect_write_flooding(sp)) { in kvm_mmu_pte_write()
4329 zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp, in kvm_mmu_pte_write()
4335 spte = get_written_sptes(sp, gpa, &npte); in kvm_mmu_pte_write()
4342 mmu_page_zap_pte(vcpu->kvm, sp, spte); in kvm_mmu_pte_write()
4344 !((sp->role.word ^ vcpu->arch.mmu.base_role.word) in kvm_mmu_pte_write()
4346 mmu_pte_write_new_pte(vcpu, sp, spte, &gentry); in kvm_mmu_pte_write()
4638 struct kvm_mmu_page *sp; in kvm_mmu_zap_collapsible_spte() local
4642 sp = page_header(__pa(sptep)); in kvm_mmu_zap_collapsible_spte()
4652 if (sp->role.direct && in kvm_mmu_zap_collapsible_spte()
4734 struct kvm_mmu_page *sp, *node; in kvm_zap_obsolete_pages() local
4738 list_for_each_entry_safe_reverse(sp, node, in kvm_zap_obsolete_pages()
4746 if (!is_obsolete_sp(kvm, sp)) in kvm_zap_obsolete_pages()
4754 if (sp->role.invalid) in kvm_zap_obsolete_pages()
4767 ret = kvm_mmu_prepare_zap_page(kvm, sp, in kvm_zap_obsolete_pages()