Lines Matching refs:sptep
151 u64 *sptep; member
164 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \
178 static void mmu_spte_set(u64 *sptep, u64 spte);
231 static void mark_mmio_spte(struct kvm *kvm, u64 *sptep, u64 gfn, in mark_mmio_spte() argument
240 trace_mark_mmio_spte(sptep, gfn, access, gen); in mark_mmio_spte()
241 mmu_spte_set(sptep, mask); in mark_mmio_spte()
261 static bool set_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn, in set_mmio_spte() argument
265 mark_mmio_spte(kvm, sptep, gfn, access); in set_mmio_spte()
341 static void __set_spte(u64 *sptep, u64 spte) in __set_spte() argument
343 *sptep = spte; in __set_spte()
346 static void __update_clear_spte_fast(u64 *sptep, u64 spte) in __update_clear_spte_fast() argument
348 *sptep = spte; in __update_clear_spte_fast()
351 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) in __update_clear_spte_slow() argument
353 return xchg(sptep, spte); in __update_clear_spte_slow()
356 static u64 __get_spte_lockless(u64 *sptep) in __get_spte_lockless() argument
358 return ACCESS_ONCE(*sptep); in __get_spte_lockless()
369 static void count_spte_clear(u64 *sptep, u64 spte) in count_spte_clear() argument
371 struct kvm_mmu_page *sp = page_header(__pa(sptep)); in count_spte_clear()
381 static void __set_spte(u64 *sptep, u64 spte) in __set_spte() argument
385 ssptep = (union split_spte *)sptep; in __set_spte()
400 static void __update_clear_spte_fast(u64 *sptep, u64 spte) in __update_clear_spte_fast() argument
404 ssptep = (union split_spte *)sptep; in __update_clear_spte_fast()
416 count_spte_clear(sptep, spte); in __update_clear_spte_fast()
419 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) in __update_clear_spte_slow() argument
423 ssptep = (union split_spte *)sptep; in __update_clear_spte_slow()
430 count_spte_clear(sptep, spte); in __update_clear_spte_slow()
453 static u64 __get_spte_lockless(u64 *sptep) in __get_spte_lockless() argument
455 struct kvm_mmu_page *sp = page_header(__pa(sptep)); in __get_spte_lockless()
456 union split_spte spte, *orig = (union split_spte *)sptep; in __get_spte_lockless()
523 static void mmu_spte_set(u64 *sptep, u64 new_spte) in mmu_spte_set() argument
525 WARN_ON(is_shadow_present_pte(*sptep)); in mmu_spte_set()
526 __set_spte(sptep, new_spte); in mmu_spte_set()
538 static bool mmu_spte_update(u64 *sptep, u64 new_spte) in mmu_spte_update() argument
540 u64 old_spte = *sptep; in mmu_spte_update()
546 mmu_spte_set(sptep, new_spte); in mmu_spte_update()
551 __update_clear_spte_fast(sptep, new_spte); in mmu_spte_update()
553 old_spte = __update_clear_spte_slow(sptep, new_spte); in mmu_spte_update()
588 static int mmu_spte_clear_track_bits(u64 *sptep) in mmu_spte_clear_track_bits() argument
591 u64 old_spte = *sptep; in mmu_spte_clear_track_bits()
594 __update_clear_spte_fast(sptep, 0ull); in mmu_spte_clear_track_bits()
596 old_spte = __update_clear_spte_slow(sptep, 0ull); in mmu_spte_clear_track_bits()
622 static void mmu_spte_clear_no_track(u64 *sptep) in mmu_spte_clear_no_track() argument
624 __update_clear_spte_fast(sptep, 0ull); in mmu_spte_clear_no_track()
627 static u64 mmu_spte_get_lockless(u64 *sptep) in mmu_spte_get_lockless() argument
629 return __get_spte_lockless(sptep); in mmu_spte_get_lockless()
1102 u64 *sptep; in rmap_get_next() local
1105 sptep = iter->desc->sptes[iter->pos]; in rmap_get_next()
1106 if (sptep) in rmap_get_next()
1107 return sptep; in rmap_get_next()
1122 static void drop_spte(struct kvm *kvm, u64 *sptep) in drop_spte() argument
1124 if (mmu_spte_clear_track_bits(sptep)) in drop_spte()
1125 rmap_remove(kvm, sptep); in drop_spte()
1129 static bool __drop_large_spte(struct kvm *kvm, u64 *sptep) in __drop_large_spte() argument
1131 if (is_large_pte(*sptep)) { in __drop_large_spte()
1132 WARN_ON(page_header(__pa(sptep))->role.level == in __drop_large_spte()
1134 drop_spte(kvm, sptep); in __drop_large_spte()
1142 static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) in drop_large_spte() argument
1144 if (__drop_large_spte(vcpu->kvm, sptep)) in drop_large_spte()
1161 static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool pt_protect) in spte_write_protect() argument
1163 u64 spte = *sptep; in spte_write_protect()
1169 rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep); in spte_write_protect()
1175 return mmu_spte_update(sptep, spte); in spte_write_protect()
1181 u64 *sptep; in __rmap_write_protect() local
1185 for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { in __rmap_write_protect()
1186 BUG_ON(!(*sptep & PT_PRESENT_MASK)); in __rmap_write_protect()
1188 flush |= spte_write_protect(kvm, sptep, pt_protect); in __rmap_write_protect()
1189 sptep = rmap_get_next(&iter); in __rmap_write_protect()
1195 static bool spte_clear_dirty(struct kvm *kvm, u64 *sptep) in spte_clear_dirty() argument
1197 u64 spte = *sptep; in spte_clear_dirty()
1199 rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep); in spte_clear_dirty()
1203 return mmu_spte_update(sptep, spte); in spte_clear_dirty()
1208 u64 *sptep; in __rmap_clear_dirty() local
1212 for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { in __rmap_clear_dirty()
1213 BUG_ON(!(*sptep & PT_PRESENT_MASK)); in __rmap_clear_dirty()
1215 flush |= spte_clear_dirty(kvm, sptep); in __rmap_clear_dirty()
1216 sptep = rmap_get_next(&iter); in __rmap_clear_dirty()
1222 static bool spte_set_dirty(struct kvm *kvm, u64 *sptep) in spte_set_dirty() argument
1224 u64 spte = *sptep; in spte_set_dirty()
1226 rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep); in spte_set_dirty()
1230 return mmu_spte_update(sptep, spte); in spte_set_dirty()
1235 u64 *sptep; in __rmap_set_dirty() local
1239 for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { in __rmap_set_dirty()
1240 BUG_ON(!(*sptep & PT_PRESENT_MASK)); in __rmap_set_dirty()
1242 flush |= spte_set_dirty(kvm, sptep); in __rmap_set_dirty()
1243 sptep = rmap_get_next(&iter); in __rmap_set_dirty()
1344 u64 *sptep; in kvm_unmap_rmapp() local
1348 while ((sptep = rmap_get_first(*rmapp, &iter))) { in kvm_unmap_rmapp()
1349 BUG_ON(!(*sptep & PT_PRESENT_MASK)); in kvm_unmap_rmapp()
1351 sptep, *sptep, gfn, level); in kvm_unmap_rmapp()
1353 drop_spte(kvm, sptep); in kvm_unmap_rmapp()
1364 u64 *sptep; in kvm_set_pte_rmapp() local
1374 for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { in kvm_set_pte_rmapp()
1375 BUG_ON(!is_shadow_present_pte(*sptep)); in kvm_set_pte_rmapp()
1377 sptep, *sptep, gfn, level); in kvm_set_pte_rmapp()
1382 drop_spte(kvm, sptep); in kvm_set_pte_rmapp()
1383 sptep = rmap_get_first(*rmapp, &iter); in kvm_set_pte_rmapp()
1385 new_spte = *sptep & ~PT64_BASE_ADDR_MASK; in kvm_set_pte_rmapp()
1392 mmu_spte_clear_track_bits(sptep); in kvm_set_pte_rmapp()
1393 mmu_spte_set(sptep, new_spte); in kvm_set_pte_rmapp()
1394 sptep = rmap_get_next(&iter); in kvm_set_pte_rmapp()
1492 u64 *sptep; in kvm_age_rmapp() local
1498 for (sptep = rmap_get_first(*rmapp, &iter); sptep; in kvm_age_rmapp()
1499 sptep = rmap_get_next(&iter)) { in kvm_age_rmapp()
1500 BUG_ON(!is_shadow_present_pte(*sptep)); in kvm_age_rmapp()
1502 if (*sptep & shadow_accessed_mask) { in kvm_age_rmapp()
1505 (unsigned long *)sptep); in kvm_age_rmapp()
1516 u64 *sptep; in kvm_test_age_rmapp() local
1528 for (sptep = rmap_get_first(*rmapp, &iter); sptep; in kvm_test_age_rmapp()
1529 sptep = rmap_get_next(&iter)) { in kvm_test_age_rmapp()
1530 BUG_ON(!is_shadow_present_pte(*sptep)); in kvm_test_age_rmapp()
1532 if (*sptep & shadow_accessed_mask) { in kvm_test_age_rmapp()
2109 iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index; in shadow_walk_okay()
2127 return __shadow_walk_next(iterator, *iterator->sptep); in shadow_walk_next()
2130 static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp, bool accessed) in link_shadow_page() argument
2143 mmu_spte_set(sptep, spte); in link_shadow_page()
2146 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, in validate_direct_spte() argument
2149 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) { in validate_direct_spte()
2159 child = page_header(*sptep & PT64_BASE_ADDR_MASK); in validate_direct_spte()
2163 drop_parent_pte(child, sptep); in validate_direct_spte()
2209 u64 *sptep; in kvm_mmu_unlink_parents() local
2212 while ((sptep = rmap_get_first(sp->parent_ptes, &iter))) in kvm_mmu_unlink_parents()
2213 drop_parent_pte(sp, sptep); in kvm_mmu_unlink_parents()
2513 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, in set_spte() argument
2521 if (set_mmio_spte(vcpu->kvm, sptep, gfn, pfn, pte_access)) in set_spte()
2569 if (!can_unsync && is_writable_pte(*sptep)) in set_spte()
2587 if (mmu_spte_update(sptep, spte)) in set_spte()
2593 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, in mmu_set_spte() argument
2602 *sptep, write_fault, gfn); in mmu_set_spte()
2604 if (is_rmap_spte(*sptep)) { in mmu_set_spte()
2610 !is_large_pte(*sptep)) { in mmu_set_spte()
2612 u64 pte = *sptep; in mmu_set_spte()
2615 drop_parent_pte(child, sptep); in mmu_set_spte()
2617 } else if (pfn != spte_to_pfn(*sptep)) { in mmu_set_spte()
2619 spte_to_pfn(*sptep), pfn); in mmu_set_spte()
2620 drop_spte(vcpu->kvm, sptep); in mmu_set_spte()
2626 if (set_spte(vcpu, sptep, pte_access, level, gfn, pfn, speculative, in mmu_set_spte()
2633 if (unlikely(is_mmio_spte(*sptep) && emulate)) in mmu_set_spte()
2636 pgprintk("%s: setting spte %llx\n", __func__, *sptep); in mmu_set_spte()
2638 is_large_pte(*sptep)? "2MB" : "4kB", in mmu_set_spte()
2639 *sptep & PT_PRESENT_MASK ?"RW":"R", gfn, in mmu_set_spte()
2640 *sptep, sptep); in mmu_set_spte()
2641 if (!was_rmapped && is_large_pte(*sptep)) in mmu_set_spte()
2644 if (is_shadow_present_pte(*sptep)) { in mmu_set_spte()
2646 rmap_count = rmap_add(vcpu, sptep, gfn); in mmu_set_spte()
2648 rmap_recycle(vcpu, sptep, gfn); in mmu_set_spte()
2693 struct kvm_mmu_page *sp, u64 *sptep) in __direct_pte_prefetch() argument
2700 i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1); in __direct_pte_prefetch()
2704 if (is_shadow_present_pte(*spte) || spte == sptep) { in __direct_pte_prefetch()
2715 static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) in direct_pte_prefetch() argument
2728 sp = page_header(__pa(sptep)); in direct_pte_prefetch()
2732 __direct_pte_prefetch(vcpu, sp, sptep); in direct_pte_prefetch()
2749 mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, in __direct_map()
2752 direct_pte_prefetch(vcpu, iterator.sptep); in __direct_map()
2757 drop_large_spte(vcpu, iterator.sptep); in __direct_map()
2758 if (!is_shadow_present_pte(*iterator.sptep)) { in __direct_map()
2765 1, ACC_ALL, iterator.sptep); in __direct_map()
2767 link_shadow_page(iterator.sptep, sp, true); in __direct_map()
2888 u64 *sptep, u64 spte) in fast_pf_fix_direct_spte() argument
2898 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); in fast_pf_fix_direct_spte()
2912 if (cmpxchg64(sptep, spte, spte | PT_WRITABLE_MASK) == spte) in fast_pf_fix_direct_spte()
2951 sp = page_header(__pa(iterator.sptep)); in fast_page_fault()
2991 ret = fast_pf_fix_direct_spte(vcpu, sp, iterator.sptep, spte); in fast_page_fault()
2993 trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep, in fast_page_fault()
3548 static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn, in sync_mmio_spte() argument
3551 if (unlikely(is_mmio_spte(*sptep))) { in sync_mmio_spte()
3552 if (gfn != get_mmio_spte_gfn(*sptep)) { in sync_mmio_spte()
3553 mmu_spte_clear_no_track(sptep); in sync_mmio_spte()
3558 mark_mmio_spte(kvm, sptep, gfn, access); in sync_mmio_spte()
4434 u64 *sptep; in kvm_mmu_zap_collapsible_spte() local
4440 for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { in kvm_mmu_zap_collapsible_spte()
4441 BUG_ON(!(*sptep & PT_PRESENT_MASK)); in kvm_mmu_zap_collapsible_spte()
4443 sp = page_header(__pa(sptep)); in kvm_mmu_zap_collapsible_spte()
4444 pfn = spte_to_pfn(*sptep); in kvm_mmu_zap_collapsible_spte()
4456 drop_spte(kvm, sptep); in kvm_mmu_zap_collapsible_spte()
4457 sptep = rmap_get_first(*rmapp, &iter); in kvm_mmu_zap_collapsible_spte()
4460 sptep = rmap_get_next(&iter); in kvm_mmu_zap_collapsible_spte()