Searched refs:sptep (Results 1 - 4 of 4) sorted by relevance

/linux-4.4.14/arch/x86/kvm/
H A Dmmu_audit.c35 typedef void (*inspect_spte_fn) (struct kvm_vcpu *vcpu, u64 *sptep, int level);
96 static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level) audit_mappings() argument
103 sp = page_header(__pa(sptep)); audit_mappings()
113 if (!is_shadow_present_pte(*sptep) || !is_last_spte(*sptep, level)) audit_mappings()
116 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); audit_mappings()
123 if ((*sptep & PT64_BASE_ADDR_MASK) != hpa) audit_mappings()
126 hpa, *sptep); audit_mappings()
129 static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep) inspect_spte_has_rmap() argument
138 rev_sp = page_header(__pa(sptep)); inspect_spte_has_rmap()
139 gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt); inspect_spte_has_rmap()
148 (long int)(sptep - rev_sp->spt), rev_sp->gfn); inspect_spte_has_rmap()
158 *sptep); inspect_spte_has_rmap()
163 static void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu, u64 *sptep, int level) audit_sptes_have_rmaps() argument
165 if (is_shadow_present_pte(*sptep) && is_last_spte(*sptep, level)) audit_sptes_have_rmaps()
166 inspect_spte_has_rmap(vcpu->kvm, sptep); audit_sptes_have_rmaps()
169 static void audit_spte_after_sync(struct kvm_vcpu *vcpu, u64 *sptep, int level) audit_spte_after_sync() argument
171 struct kvm_mmu_page *sp = page_header(__pa(sptep)); audit_spte_after_sync()
196 u64 *sptep; audit_write_protection() local
208 for_each_rmap_spte(rmapp, &iter, sptep) audit_write_protection()
209 if (is_writable_pte(*sptep)) audit_write_protection()
226 static void audit_spte(struct kvm_vcpu *vcpu, u64 *sptep, int level) audit_spte() argument
228 audit_sptes_have_rmaps(vcpu, sptep, level); audit_spte()
229 audit_mappings(vcpu, sptep, level); audit_spte()
230 audit_spte_after_sync(vcpu, sptep, level); audit_spte()
H A Dmmutrace.h202 TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access, unsigned int gen),
203 TP_ARGS(sptep, gfn, access, gen),
206 __field(void *, sptep)
213 __entry->sptep = sptep;
219 TP_printk("sptep:%p gfn %llx access %x gen %x", __entry->sptep,
250 u64 *sptep, u64 old_spte, bool retry),
251 TP_ARGS(vcpu, gva, error_code, sptep, old_spte, retry),
257 __field(u64 *, sptep)
267 __entry->sptep = sptep;
269 __entry->new_spte = *sptep;
273 TP_printk("vcpu %d gva %lx error_code %s sptep %p old %#llx"
276 kvm_mmu_trace_pferr_flags), __entry->sptep,
H A Dmmu.c151 u64 *sptep; member in struct:kvm_shadow_walk_iterator
164 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \
178 static void mmu_spte_set(u64 *sptep, u64 spte);
231 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, mark_mmio_spte() argument
240 trace_mark_mmio_spte(sptep, gfn, access, gen); mark_mmio_spte()
241 mmu_spte_set(sptep, mask); mark_mmio_spte()
261 static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, set_mmio_spte() argument
265 mark_mmio_spte(vcpu, sptep, gfn, access); set_mmio_spte()
341 static void __set_spte(u64 *sptep, u64 spte) __set_spte() argument
343 *sptep = spte; __set_spte()
346 static void __update_clear_spte_fast(u64 *sptep, u64 spte) __update_clear_spte_fast() argument
348 *sptep = spte; __update_clear_spte_fast()
351 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) __update_clear_spte_slow() argument
353 return xchg(sptep, spte); __update_clear_spte_slow()
356 static u64 __get_spte_lockless(u64 *sptep) __get_spte_lockless() argument
358 return ACCESS_ONCE(*sptep); __get_spte_lockless()
369 static void count_spte_clear(u64 *sptep, u64 spte) count_spte_clear() argument
371 struct kvm_mmu_page *sp = page_header(__pa(sptep)); count_spte_clear()
381 static void __set_spte(u64 *sptep, u64 spte) __set_spte() argument
385 ssptep = (union split_spte *)sptep; __set_spte()
400 static void __update_clear_spte_fast(u64 *sptep, u64 spte) __update_clear_spte_fast() argument
404 ssptep = (union split_spte *)sptep; __update_clear_spte_fast()
416 count_spte_clear(sptep, spte); __update_clear_spte_fast()
419 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) __update_clear_spte_slow() argument
423 ssptep = (union split_spte *)sptep; __update_clear_spte_slow()
430 count_spte_clear(sptep, spte); __update_clear_spte_slow()
453 static u64 __get_spte_lockless(u64 *sptep) __get_spte_lockless() argument
455 struct kvm_mmu_page *sp = page_header(__pa(sptep)); __get_spte_lockless()
456 union split_spte spte, *orig = (union split_spte *)sptep; __get_spte_lockless()
518 * Set the sptep from nonpresent to present.
519 * Note: the sptep being assigned *must* be either not present
523 static void mmu_spte_set(u64 *sptep, u64 new_spte) mmu_spte_set() argument
525 WARN_ON(is_shadow_present_pte(*sptep)); mmu_spte_set()
526 __set_spte(sptep, new_spte); mmu_spte_set()
538 static bool mmu_spte_update(u64 *sptep, u64 new_spte) mmu_spte_update() argument
540 u64 old_spte = *sptep; mmu_spte_update()
546 mmu_spte_set(sptep, new_spte); mmu_spte_update()
551 __update_clear_spte_fast(sptep, new_spte); mmu_spte_update()
553 old_spte = __update_clear_spte_slow(sptep, new_spte); mmu_spte_update()
585 * It sets the sptep from present to nonpresent, and track the
586 * state bits, it is used to clear the last level sptep.
588 static int mmu_spte_clear_track_bits(u64 *sptep) mmu_spte_clear_track_bits() argument
591 u64 old_spte = *sptep; mmu_spte_clear_track_bits()
594 __update_clear_spte_fast(sptep, 0ull); mmu_spte_clear_track_bits()
596 old_spte = __update_clear_spte_slow(sptep, 0ull); mmu_spte_clear_track_bits()
619 * Directly clear spte without caring the state bits of sptep,
622 static void mmu_spte_clear_no_track(u64 *sptep) mmu_spte_clear_no_track() argument
624 __update_clear_spte_fast(sptep, 0ull); mmu_spte_clear_no_track()
627 static u64 mmu_spte_get_lockless(u64 *sptep) mmu_spte_get_lockless() argument
629 return __get_spte_lockless(sptep); mmu_spte_get_lockless()
1094 struct pte_list_desc *desc; /* holds the sptep if not NULL */
1095 int pos; /* index of the sptep */
1103 * Returns sptep if found, NULL otherwise.
1123 * Returns sptep if found, NULL otherwise.
1129 u64 *sptep; rmap_get_next() local
1132 sptep = iter->desc->sptes[iter->pos]; rmap_get_next()
1133 if (sptep) rmap_get_next()
1134 return sptep; rmap_get_next()
1154 static void drop_spte(struct kvm *kvm, u64 *sptep) drop_spte() argument
1156 if (mmu_spte_clear_track_bits(sptep)) drop_spte()
1157 rmap_remove(kvm, sptep); drop_spte()
1161 static bool __drop_large_spte(struct kvm *kvm, u64 *sptep) __drop_large_spte() argument
1163 if (is_large_pte(*sptep)) { __drop_large_spte()
1164 WARN_ON(page_header(__pa(sptep))->role.level == __drop_large_spte()
1166 drop_spte(kvm, sptep); __drop_large_spte()
1174 static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) drop_large_spte() argument
1176 if (__drop_large_spte(vcpu->kvm, sptep)) drop_large_spte()
1181 * Write-protect on the specified @sptep, @pt_protect indicates whether
1193 static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool pt_protect) spte_write_protect() argument
1195 u64 spte = *sptep; spte_write_protect()
1201 rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep); spte_write_protect()
1207 return mmu_spte_update(sptep, spte); spte_write_protect()
1213 u64 *sptep; __rmap_write_protect() local
1217 for_each_rmap_spte(rmapp, &iter, sptep) __rmap_write_protect()
1218 flush |= spte_write_protect(kvm, sptep, pt_protect); __rmap_write_protect()
1223 static bool spte_clear_dirty(struct kvm *kvm, u64 *sptep) spte_clear_dirty() argument
1225 u64 spte = *sptep; spte_clear_dirty()
1227 rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep); spte_clear_dirty()
1231 return mmu_spte_update(sptep, spte); spte_clear_dirty()
1236 u64 *sptep; __rmap_clear_dirty() local
1240 for_each_rmap_spte(rmapp, &iter, sptep) __rmap_clear_dirty()
1241 flush |= spte_clear_dirty(kvm, sptep); __rmap_clear_dirty()
1246 static bool spte_set_dirty(struct kvm *kvm, u64 *sptep) spte_set_dirty() argument
1248 u64 spte = *sptep; spte_set_dirty()
1250 rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep); spte_set_dirty()
1254 return mmu_spte_update(sptep, spte); spte_set_dirty()
1259 u64 *sptep; __rmap_set_dirty() local
1263 for_each_rmap_spte(rmapp, &iter, sptep) __rmap_set_dirty()
1264 flush |= spte_set_dirty(kvm, sptep); __rmap_set_dirty()
1361 u64 *sptep; kvm_zap_rmapp() local
1365 while ((sptep = rmap_get_first(*rmapp, &iter))) { kvm_zap_rmapp()
1366 BUG_ON(!(*sptep & PT_PRESENT_MASK)); kvm_zap_rmapp()
1367 rmap_printk("%s: spte %p %llx.\n", __func__, sptep, *sptep); kvm_zap_rmapp()
1369 drop_spte(kvm, sptep); kvm_zap_rmapp()
1387 u64 *sptep; kvm_set_pte_rmapp() local
1398 for_each_rmap_spte(rmapp, &iter, sptep) { kvm_set_pte_rmapp()
1400 sptep, *sptep, gfn, level); kvm_set_pte_rmapp()
1405 drop_spte(kvm, sptep); kvm_set_pte_rmapp()
1408 new_spte = *sptep & ~PT64_BASE_ADDR_MASK; kvm_set_pte_rmapp()
1415 mmu_spte_clear_track_bits(sptep); kvm_set_pte_rmapp()
1416 mmu_spte_set(sptep, new_spte); kvm_set_pte_rmapp()
1570 u64 *sptep; kvm_age_rmapp() local
1576 for_each_rmap_spte(rmapp, &iter, sptep) kvm_age_rmapp()
1577 if (*sptep & shadow_accessed_mask) { kvm_age_rmapp()
1580 (unsigned long *)sptep); kvm_age_rmapp()
1591 u64 *sptep; kvm_test_age_rmapp() local
1603 for_each_rmap_spte(rmapp, &iter, sptep) kvm_test_age_rmapp()
1604 if (*sptep & shadow_accessed_mask) { kvm_test_age_rmapp()
2180 iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index; shadow_walk_okay()
2198 return __shadow_walk_next(iterator, *iterator->sptep); shadow_walk_next()
2201 static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp, bool accessed) link_shadow_page() argument
2214 mmu_spte_set(sptep, spte); link_shadow_page()
2217 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, validate_direct_spte() argument
2220 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) { validate_direct_spte()
2230 child = page_header(*sptep & PT64_BASE_ADDR_MASK); validate_direct_spte()
2234 drop_parent_pte(child, sptep); validate_direct_spte()
2280 u64 *sptep; kvm_mmu_unlink_parents() local
2283 while ((sptep = rmap_get_first(sp->parent_ptes, &iter))) kvm_mmu_unlink_parents()
2284 drop_parent_pte(sp, sptep); kvm_mmu_unlink_parents()
2487 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, set_spte() argument
2495 if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access)) set_spte()
2543 if (!can_unsync && is_writable_pte(*sptep)) set_spte()
2561 if (mmu_spte_update(sptep, spte)) set_spte()
2567 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, mmu_set_spte() argument
2576 *sptep, write_fault, gfn); mmu_set_spte()
2578 if (is_rmap_spte(*sptep)) { mmu_set_spte()
2584 !is_large_pte(*sptep)) { mmu_set_spte()
2586 u64 pte = *sptep; mmu_set_spte()
2589 drop_parent_pte(child, sptep); mmu_set_spte()
2591 } else if (pfn != spte_to_pfn(*sptep)) { mmu_set_spte()
2593 spte_to_pfn(*sptep), pfn); mmu_set_spte()
2594 drop_spte(vcpu->kvm, sptep); mmu_set_spte()
2600 if (set_spte(vcpu, sptep, pte_access, level, gfn, pfn, speculative, mmu_set_spte()
2607 if (unlikely(is_mmio_spte(*sptep) && emulate)) mmu_set_spte()
2610 pgprintk("%s: setting spte %llx\n", __func__, *sptep); mmu_set_spte()
2612 is_large_pte(*sptep)? "2MB" : "4kB", mmu_set_spte()
2613 *sptep & PT_PRESENT_MASK ?"RW":"R", gfn, mmu_set_spte()
2614 *sptep, sptep); mmu_set_spte()
2615 if (!was_rmapped && is_large_pte(*sptep)) mmu_set_spte()
2618 if (is_shadow_present_pte(*sptep)) { mmu_set_spte()
2620 rmap_count = rmap_add(vcpu, sptep, gfn); mmu_set_spte()
2622 rmap_recycle(vcpu, sptep, gfn); mmu_set_spte()
2669 struct kvm_mmu_page *sp, u64 *sptep) __direct_pte_prefetch()
2676 i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1); __direct_pte_prefetch()
2680 if (is_shadow_present_pte(*spte) || spte == sptep) { __direct_pte_prefetch()
2691 static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) direct_pte_prefetch() argument
2704 sp = page_header(__pa(sptep)); direct_pte_prefetch()
2708 __direct_pte_prefetch(vcpu, sp, sptep); direct_pte_prefetch()
2725 mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, __direct_map()
2728 direct_pte_prefetch(vcpu, iterator.sptep); __direct_map()
2733 drop_large_spte(vcpu, iterator.sptep); __direct_map()
2734 if (!is_shadow_present_pte(*iterator.sptep)) { __direct_map()
2741 1, ACC_ALL, iterator.sptep); __direct_map()
2743 link_shadow_page(iterator.sptep, sp, true); __direct_map()
2864 u64 *sptep, u64 spte) fast_pf_fix_direct_spte()
2874 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); fast_pf_fix_direct_spte()
2888 if (cmpxchg64(sptep, spte, spte | PT_WRITABLE_MASK) == spte) fast_pf_fix_direct_spte()
2927 sp = page_header(__pa(iterator.sptep));
2967 ret = fast_pf_fix_direct_spte(vcpu, sp, iterator.sptep, spte);
2969 trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep,
3318 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) walk_shadow_page_get_mmio_spte() argument
3334 spte = mmu_spte_get_lockless(iterator.sptep); walk_shadow_page_get_mmio_spte()
3358 *sptep = spte; walk_shadow_page_get_mmio_spte()
3577 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, sync_mmio_spte() argument
3580 if (unlikely(is_mmio_spte(*sptep))) { sync_mmio_spte()
3581 if (gfn != get_mmio_spte_gfn(*sptep)) { sync_mmio_spte()
3582 mmu_spte_clear_no_track(sptep); sync_mmio_spte()
3587 mark_mmio_spte(vcpu, sptep, gfn, access); sync_mmio_spte()
4634 u64 *sptep; kvm_mmu_zap_collapsible_spte() local
4641 for_each_rmap_spte(rmapp, &iter, sptep) { kvm_mmu_zap_collapsible_spte()
4642 sp = page_header(__pa(sptep)); kvm_mmu_zap_collapsible_spte()
4643 pfn = spte_to_pfn(*sptep); kvm_mmu_zap_collapsible_spte()
4655 drop_spte(kvm, sptep); kvm_mmu_zap_collapsible_spte()
2668 __direct_pte_prefetch(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *sptep) __direct_pte_prefetch() argument
2863 fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *sptep, u64 spte) fast_pf_fix_direct_spte() argument
H A Dpaging_tmpl.h516 u64 *sptep) pte_prefetch()
523 sp = page_header(__pa(sptep)); pte_prefetch()
529 return __direct_pte_prefetch(vcpu, sp, sptep); pte_prefetch()
531 i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1); pte_prefetch()
535 if (spte == sptep) pte_prefetch()
583 clear_sp_write_flooding_count(it.sptep); fetch()
584 drop_large_spte(vcpu, it.sptep); fetch()
587 if (!is_shadow_present_pte(*it.sptep)) { fetch()
590 false, access, it.sptep); fetch()
601 link_shadow_page(it.sptep, sp, PT_GUEST_ACCESSED_MASK); fetch()
609 clear_sp_write_flooding_count(it.sptep); fetch()
610 validate_direct_spte(vcpu, it.sptep, direct_access); fetch()
612 drop_large_spte(vcpu, it.sptep); fetch()
614 if (is_shadow_present_pte(*it.sptep)) fetch()
620 true, direct_access, it.sptep); fetch()
621 link_shadow_page(it.sptep, sp, PT_GUEST_ACCESSED_MASK); fetch()
624 clear_sp_write_flooding_count(it.sptep); fetch()
625 mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault, &emulate, fetch()
627 FNAME(pte_prefetch)(vcpu, gw, it.sptep); fetch()
633 kvm_mmu_put_page(sp, it.sptep); fetch()
824 u64 *sptep; invlpg() local
842 sptep = iterator.sptep; for_each_shadow_entry()
844 sp = page_header(__pa(sptep)); for_each_shadow_entry()
845 if (is_last_spte(*sptep, level)) { for_each_shadow_entry()
853 pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); for_each_shadow_entry()
855 if (mmu_page_zap_pte(vcpu->kvm, sp, sptep)) for_each_shadow_entry()
865 FNAME(update_pte)(vcpu, sp, sptep, &gpte); for_each_shadow_entry()
868 if (!is_shadow_present_pte(*sptep) || !sp->unsync_children) for_each_shadow_entry()
515 pte_prefetch(struct kvm_vcpu *vcpu, struct guest_walker *gw, u64 *sptep) pte_prefetch() argument

Completed in 88 milliseconds