sptep            1150 arch/s390/include/asm/pgtable.h 		    pte_t *sptep, pte_t *tptep, pte_t pte);
sptep            2111 arch/s390/mm/gmap.c 	pte_t *sptep, *tptep;
sptep            2135 arch/s390/mm/gmap.c 		sptep = gmap_pte_op_walk(parent, paddr, &ptl);
sptep            2136 arch/s390/mm/gmap.c 		if (sptep) {
sptep            2146 arch/s390/mm/gmap.c 			rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
sptep             631 arch/s390/mm/pgtable.c 		    pte_t *sptep, pte_t *tptep, pte_t pte)
sptep             639 arch/s390/mm/pgtable.c 	spgste = pgste_get_lock(sptep);
sptep             640 arch/s390/mm/pgtable.c 	spte = *sptep;
sptep             653 arch/s390/mm/pgtable.c 	pgste_set_unlock(sptep, spgste);
sptep             213 arch/x86/kvm/mmu.c 	u64 *sptep;
sptep             243 arch/x86/kvm/mmu.c 		({ spte = mmu_spte_get_lockless(_walker.sptep); 1; });	\
sptep             305 arch/x86/kvm/mmu.c static void mmu_spte_set(u64 *sptep, u64 spte);
sptep             456 arch/x86/kvm/mmu.c static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
sptep             469 arch/x86/kvm/mmu.c 	trace_mark_mmio_spte(sptep, gfn, access, gen);
sptep             470 arch/x86/kvm/mmu.c 	mmu_spte_set(sptep, mask);
sptep             488 arch/x86/kvm/mmu.c static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
sptep             492 arch/x86/kvm/mmu.c 		mark_mmio_spte(vcpu, sptep, gfn, access);
sptep             646 arch/x86/kvm/mmu.c static void __set_spte(u64 *sptep, u64 spte)
sptep             648 arch/x86/kvm/mmu.c 	WRITE_ONCE(*sptep, spte);
sptep             651 arch/x86/kvm/mmu.c static void __update_clear_spte_fast(u64 *sptep, u64 spte)
sptep             653 arch/x86/kvm/mmu.c 	WRITE_ONCE(*sptep, spte);
sptep             656 arch/x86/kvm/mmu.c static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
sptep             658 arch/x86/kvm/mmu.c 	return xchg(sptep, spte);
sptep             661 arch/x86/kvm/mmu.c static u64 __get_spte_lockless(u64 *sptep)
sptep             663 arch/x86/kvm/mmu.c 	return READ_ONCE(*sptep);
sptep             674 arch/x86/kvm/mmu.c static void count_spte_clear(u64 *sptep, u64 spte)
sptep             676 arch/x86/kvm/mmu.c 	struct kvm_mmu_page *sp =  page_header(__pa(sptep));
sptep             686 arch/x86/kvm/mmu.c static void __set_spte(u64 *sptep, u64 spte)
sptep             690 arch/x86/kvm/mmu.c 	ssptep = (union split_spte *)sptep;
sptep             705 arch/x86/kvm/mmu.c static void __update_clear_spte_fast(u64 *sptep, u64 spte)
sptep             709 arch/x86/kvm/mmu.c 	ssptep = (union split_spte *)sptep;
sptep             721 arch/x86/kvm/mmu.c 	count_spte_clear(sptep, spte);
sptep             724 arch/x86/kvm/mmu.c static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
sptep             728 arch/x86/kvm/mmu.c 	ssptep = (union split_spte *)sptep;
sptep             735 arch/x86/kvm/mmu.c 	count_spte_clear(sptep, spte);
sptep             758 arch/x86/kvm/mmu.c static u64 __get_spte_lockless(u64 *sptep)
sptep             760 arch/x86/kvm/mmu.c 	struct kvm_mmu_page *sp =  page_header(__pa(sptep));
sptep             761 arch/x86/kvm/mmu.c 	union split_spte spte, *orig = (union split_spte *)sptep;
sptep             833 arch/x86/kvm/mmu.c static void mmu_spte_set(u64 *sptep, u64 new_spte)
sptep             835 arch/x86/kvm/mmu.c 	WARN_ON(is_shadow_present_pte(*sptep));
sptep             836 arch/x86/kvm/mmu.c 	__set_spte(sptep, new_spte);
sptep             843 arch/x86/kvm/mmu.c static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
sptep             845 arch/x86/kvm/mmu.c 	u64 old_spte = *sptep;
sptep             850 arch/x86/kvm/mmu.c 		mmu_spte_set(sptep, new_spte);
sptep             855 arch/x86/kvm/mmu.c 		__update_clear_spte_fast(sptep, new_spte);
sptep             857 arch/x86/kvm/mmu.c 		old_spte = __update_clear_spte_slow(sptep, new_spte);
sptep             875 arch/x86/kvm/mmu.c static bool mmu_spte_update(u64 *sptep, u64 new_spte)
sptep             878 arch/x86/kvm/mmu.c 	u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);
sptep             916 arch/x86/kvm/mmu.c static int mmu_spte_clear_track_bits(u64 *sptep)
sptep             919 arch/x86/kvm/mmu.c 	u64 old_spte = *sptep;
sptep             922 arch/x86/kvm/mmu.c 		__update_clear_spte_fast(sptep, 0ull);
sptep             924 arch/x86/kvm/mmu.c 		old_spte = __update_clear_spte_slow(sptep, 0ull);
sptep             952 arch/x86/kvm/mmu.c static void mmu_spte_clear_no_track(u64 *sptep)
sptep             954 arch/x86/kvm/mmu.c 	__update_clear_spte_fast(sptep, 0ull);
sptep             957 arch/x86/kvm/mmu.c static u64 mmu_spte_get_lockless(u64 *sptep)
sptep             959 arch/x86/kvm/mmu.c 	return __get_spte_lockless(sptep);
sptep            1009 arch/x86/kvm/mmu.c static bool mmu_spte_age(u64 *sptep)
sptep            1011 arch/x86/kvm/mmu.c 	u64 spte = mmu_spte_get_lockless(sptep);
sptep            1018 arch/x86/kvm/mmu.c 			  (unsigned long *)sptep);
sptep            1028 arch/x86/kvm/mmu.c 		mmu_spte_update_no_track(sptep, spte);
sptep            1463 arch/x86/kvm/mmu.c static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep)
sptep            1465 arch/x86/kvm/mmu.c 	mmu_spte_clear_track_bits(sptep);
sptep            1466 arch/x86/kvm/mmu.c 	__pte_list_remove(sptep, rmap_head);
sptep            1540 arch/x86/kvm/mmu.c 	u64 *sptep;
sptep            1547 arch/x86/kvm/mmu.c 		sptep = (u64 *)rmap_head->val;
sptep            1553 arch/x86/kvm/mmu.c 	sptep = iter->desc->sptes[iter->pos];
sptep            1555 arch/x86/kvm/mmu.c 	BUG_ON(!is_shadow_present_pte(*sptep));
sptep            1556 arch/x86/kvm/mmu.c 	return sptep;
sptep            1566 arch/x86/kvm/mmu.c 	u64 *sptep;
sptep            1571 arch/x86/kvm/mmu.c 			sptep = iter->desc->sptes[iter->pos];
sptep            1572 arch/x86/kvm/mmu.c 			if (sptep)
sptep            1581 arch/x86/kvm/mmu.c 			sptep = iter->desc->sptes[iter->pos];
sptep            1588 arch/x86/kvm/mmu.c 	BUG_ON(!is_shadow_present_pte(*sptep));
sptep            1589 arch/x86/kvm/mmu.c 	return sptep;
sptep            1596 arch/x86/kvm/mmu.c static void drop_spte(struct kvm *kvm, u64 *sptep)
sptep            1598 arch/x86/kvm/mmu.c 	if (mmu_spte_clear_track_bits(sptep))
sptep            1599 arch/x86/kvm/mmu.c 		rmap_remove(kvm, sptep);
sptep            1603 arch/x86/kvm/mmu.c static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
sptep            1605 arch/x86/kvm/mmu.c 	if (is_large_pte(*sptep)) {
sptep            1606 arch/x86/kvm/mmu.c 		WARN_ON(page_header(__pa(sptep))->role.level ==
sptep            1608 arch/x86/kvm/mmu.c 		drop_spte(kvm, sptep);
sptep            1616 arch/x86/kvm/mmu.c static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
sptep            1618 arch/x86/kvm/mmu.c 	if (__drop_large_spte(vcpu->kvm, sptep)) {
sptep            1619 arch/x86/kvm/mmu.c 		struct kvm_mmu_page *sp = page_header(__pa(sptep));
sptep            1639 arch/x86/kvm/mmu.c static bool spte_write_protect(u64 *sptep, bool pt_protect)
sptep            1641 arch/x86/kvm/mmu.c 	u64 spte = *sptep;
sptep            1647 arch/x86/kvm/mmu.c 	rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);
sptep            1653 arch/x86/kvm/mmu.c 	return mmu_spte_update(sptep, spte);
sptep            1660 arch/x86/kvm/mmu.c 	u64 *sptep;
sptep            1664 arch/x86/kvm/mmu.c 	for_each_rmap_spte(rmap_head, &iter, sptep)
sptep            1665 arch/x86/kvm/mmu.c 		flush |= spte_write_protect(sptep, pt_protect);
sptep            1670 arch/x86/kvm/mmu.c static bool spte_clear_dirty(u64 *sptep)
sptep            1672 arch/x86/kvm/mmu.c 	u64 spte = *sptep;
sptep            1674 arch/x86/kvm/mmu.c 	rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep);
sptep            1678 arch/x86/kvm/mmu.c 	return mmu_spte_update(sptep, spte);
sptep            1681 arch/x86/kvm/mmu.c static bool spte_wrprot_for_clear_dirty(u64 *sptep)
sptep            1684 arch/x86/kvm/mmu.c 					       (unsigned long *)sptep);
sptep            1685 arch/x86/kvm/mmu.c 	if (was_writable && !spte_ad_enabled(*sptep))
sptep            1686 arch/x86/kvm/mmu.c 		kvm_set_pfn_dirty(spte_to_pfn(*sptep));
sptep            1699 arch/x86/kvm/mmu.c 	u64 *sptep;
sptep            1703 arch/x86/kvm/mmu.c 	for_each_rmap_spte(rmap_head, &iter, sptep)
sptep            1704 arch/x86/kvm/mmu.c 		if (spte_ad_need_write_protect(*sptep))
sptep            1705 arch/x86/kvm/mmu.c 			flush |= spte_wrprot_for_clear_dirty(sptep);
sptep            1707 arch/x86/kvm/mmu.c 			flush |= spte_clear_dirty(sptep);
sptep            1712 arch/x86/kvm/mmu.c static bool spte_set_dirty(u64 *sptep)
sptep            1714 arch/x86/kvm/mmu.c 	u64 spte = *sptep;
sptep            1716 arch/x86/kvm/mmu.c 	rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep);
sptep            1725 arch/x86/kvm/mmu.c 	return mmu_spte_update(sptep, spte);
sptep            1730 arch/x86/kvm/mmu.c 	u64 *sptep;
sptep            1734 arch/x86/kvm/mmu.c 	for_each_rmap_spte(rmap_head, &iter, sptep)
sptep            1735 arch/x86/kvm/mmu.c 		if (spte_ad_enabled(*sptep))
sptep            1736 arch/x86/kvm/mmu.c 			flush |= spte_set_dirty(sptep);
sptep            1855 arch/x86/kvm/mmu.c 	u64 *sptep;
sptep            1859 arch/x86/kvm/mmu.c 	while ((sptep = rmap_get_first(rmap_head, &iter))) {
sptep            1860 arch/x86/kvm/mmu.c 		rmap_printk("%s: spte %p %llx.\n", __func__, sptep, *sptep);
sptep            1862 arch/x86/kvm/mmu.c 		pte_list_remove(rmap_head, sptep);
sptep            1880 arch/x86/kvm/mmu.c 	u64 *sptep;
sptep            1891 arch/x86/kvm/mmu.c 	for_each_rmap_spte(rmap_head, &iter, sptep) {
sptep            1893 arch/x86/kvm/mmu.c 			    sptep, *sptep, gfn, level);
sptep            1898 arch/x86/kvm/mmu.c 			pte_list_remove(rmap_head, sptep);
sptep            1901 arch/x86/kvm/mmu.c 			new_spte = *sptep & ~PT64_BASE_ADDR_MASK;
sptep            1909 arch/x86/kvm/mmu.c 			mmu_spte_clear_track_bits(sptep);
sptep            1910 arch/x86/kvm/mmu.c 			mmu_spte_set(sptep, new_spte);
sptep            2062 arch/x86/kvm/mmu.c 	u64 *sptep;
sptep            2066 arch/x86/kvm/mmu.c 	for_each_rmap_spte(rmap_head, &iter, sptep)
sptep            2067 arch/x86/kvm/mmu.c 		young |= mmu_spte_age(sptep);
sptep            2077 arch/x86/kvm/mmu.c 	u64 *sptep;
sptep            2080 arch/x86/kvm/mmu.c 	for_each_rmap_spte(rmap_head, &iter, sptep)
sptep            2081 arch/x86/kvm/mmu.c 		if (is_accessed_spte(*sptep))
sptep            2202 arch/x86/kvm/mmu.c 	u64 *sptep;
sptep            2205 arch/x86/kvm/mmu.c 	for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
sptep            2206 arch/x86/kvm/mmu.c 		mark_unsync(sptep);
sptep            2682 arch/x86/kvm/mmu.c 	iterator->sptep	= ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
sptep            2700 arch/x86/kvm/mmu.c 	__shadow_walk_next(iterator, *iterator->sptep);
sptep            2703 arch/x86/kvm/mmu.c static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
sptep            2718 arch/x86/kvm/mmu.c 	mmu_spte_set(sptep, spte);
sptep            2720 arch/x86/kvm/mmu.c 	mmu_page_add_parent_pte(vcpu, sp, sptep);
sptep            2723 arch/x86/kvm/mmu.c 		mark_unsync(sptep);
sptep            2726 arch/x86/kvm/mmu.c static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
sptep            2729 arch/x86/kvm/mmu.c 	if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
sptep            2739 arch/x86/kvm/mmu.c 		child = page_header(*sptep & PT64_BASE_ADDR_MASK);
sptep            2743 arch/x86/kvm/mmu.c 		drop_parent_pte(child, sptep);
sptep            2784 arch/x86/kvm/mmu.c 	u64 *sptep;
sptep            2787 arch/x86/kvm/mmu.c 	while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
sptep            2788 arch/x86/kvm/mmu.c 		drop_parent_pte(sp, sptep);
sptep            3049 arch/x86/kvm/mmu.c static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
sptep            3058 arch/x86/kvm/mmu.c 	if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access))
sptep            3061 arch/x86/kvm/mmu.c 	sp = page_header(__pa(sptep));
sptep            3126 arch/x86/kvm/mmu.c 		if (!can_unsync && is_writable_pte(*sptep))
sptep            3147 arch/x86/kvm/mmu.c 	if (mmu_spte_update(sptep, spte))
sptep            3153 arch/x86/kvm/mmu.c static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
sptep            3164 arch/x86/kvm/mmu.c 		 *sptep, write_fault, gfn);
sptep            3166 arch/x86/kvm/mmu.c 	if (is_shadow_present_pte(*sptep)) {
sptep            3172 arch/x86/kvm/mmu.c 		    !is_large_pte(*sptep)) {
sptep            3174 arch/x86/kvm/mmu.c 			u64 pte = *sptep;
sptep            3177 arch/x86/kvm/mmu.c 			drop_parent_pte(child, sptep);
sptep            3179 arch/x86/kvm/mmu.c 		} else if (pfn != spte_to_pfn(*sptep)) {
sptep            3181 arch/x86/kvm/mmu.c 				 spte_to_pfn(*sptep), pfn);
sptep            3182 arch/x86/kvm/mmu.c 			drop_spte(vcpu->kvm, sptep);
sptep            3188 arch/x86/kvm/mmu.c 	set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn,
sptep            3200 arch/x86/kvm/mmu.c 	if (unlikely(is_mmio_spte(*sptep)))
sptep            3203 arch/x86/kvm/mmu.c 	pgprintk("%s: setting spte %llx\n", __func__, *sptep);
sptep            3204 arch/x86/kvm/mmu.c 	trace_kvm_mmu_set_spte(level, gfn, sptep);
sptep            3205 arch/x86/kvm/mmu.c 	if (!was_rmapped && is_large_pte(*sptep))
sptep            3208 arch/x86/kvm/mmu.c 	if (is_shadow_present_pte(*sptep)) {
sptep            3210 arch/x86/kvm/mmu.c 			rmap_count = rmap_add(vcpu, sptep, gfn);
sptep            3212 arch/x86/kvm/mmu.c 				rmap_recycle(vcpu, sptep, gfn);
sptep            3260 arch/x86/kvm/mmu.c 				  struct kvm_mmu_page *sp, u64 *sptep)
sptep            3267 arch/x86/kvm/mmu.c 	i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
sptep            3271 arch/x86/kvm/mmu.c 		if (is_shadow_present_pte(*spte) || spte == sptep) {
sptep            3282 arch/x86/kvm/mmu.c static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
sptep            3286 arch/x86/kvm/mmu.c 	sp = page_header(__pa(sptep));
sptep            3299 arch/x86/kvm/mmu.c 	__direct_pte_prefetch(vcpu, sp, sptep);
sptep            3306 arch/x86/kvm/mmu.c 	u64 spte = *it.sptep;
sptep            3350 arch/x86/kvm/mmu.c 		drop_large_spte(vcpu, it.sptep);
sptep            3351 arch/x86/kvm/mmu.c 		if (!is_shadow_present_pte(*it.sptep)) {
sptep            3355 arch/x86/kvm/mmu.c 			link_shadow_page(vcpu, it.sptep, sp);
sptep            3361 arch/x86/kvm/mmu.c 	ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL,
sptep            3364 arch/x86/kvm/mmu.c 	direct_pte_prefetch(vcpu, it.sptep);
sptep            3486 arch/x86/kvm/mmu.c 			u64 *sptep, u64 old_spte, u64 new_spte)
sptep            3504 arch/x86/kvm/mmu.c 	if (cmpxchg64(sptep, old_spte, new_spte) != old_spte)
sptep            3512 arch/x86/kvm/mmu.c 		gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
sptep            3561 arch/x86/kvm/mmu.c 		sp = page_header(__pa(iterator.sptep));
sptep            3621 arch/x86/kvm/mmu.c 							iterator.sptep, spte,
sptep            3634 arch/x86/kvm/mmu.c 	trace_fast_page_fault(vcpu, cr2_or_gpa, error_code, iterator.sptep,
sptep            4043 arch/x86/kvm/mmu.c walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
sptep            4059 arch/x86/kvm/mmu.c 		spte = mmu_spte_get_lockless(iterator.sptep);
sptep            4083 arch/x86/kvm/mmu.c 	*sptep = spte;
sptep            4151 arch/x86/kvm/mmu.c 		clear_sp_write_flooding_count(iterator.sptep);
sptep            4463 arch/x86/kvm/mmu.c static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
sptep            4466 arch/x86/kvm/mmu.c 	if (unlikely(is_mmio_spte(*sptep))) {
sptep            4467 arch/x86/kvm/mmu.c 		if (gfn != get_mmio_spte_gfn(*sptep)) {
sptep            4468 arch/x86/kvm/mmu.c 			mmu_spte_clear_no_track(sptep);
sptep            4473 arch/x86/kvm/mmu.c 		mark_mmio_spte(vcpu, sptep, gfn, access);
sptep            6015 arch/x86/kvm/mmu.c 	u64 *sptep;
sptep            6022 arch/x86/kvm/mmu.c 	for_each_rmap_spte(rmap_head, &iter, sptep) {
sptep            6023 arch/x86/kvm/mmu.c 		sp = page_header(__pa(sptep));
sptep            6024 arch/x86/kvm/mmu.c 		pfn = spte_to_pfn(*sptep);
sptep            6036 arch/x86/kvm/mmu.c 			pte_list_remove(rmap_head, sptep);
sptep              32 arch/x86/kvm/mmu_audit.c typedef void (*inspect_spte_fn) (struct kvm_vcpu *vcpu, u64 *sptep, int level);
sptep              93 arch/x86/kvm/mmu_audit.c static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
sptep             100 arch/x86/kvm/mmu_audit.c 	sp = page_header(__pa(sptep));
sptep             110 arch/x86/kvm/mmu_audit.c 	if (!is_shadow_present_pte(*sptep) || !is_last_spte(*sptep, level))
sptep             113 arch/x86/kvm/mmu_audit.c 	gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
sptep             120 arch/x86/kvm/mmu_audit.c 	if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
sptep             123 arch/x86/kvm/mmu_audit.c 			     hpa, *sptep);
sptep             126 arch/x86/kvm/mmu_audit.c static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
sptep             135 arch/x86/kvm/mmu_audit.c 	rev_sp = page_header(__pa(sptep));
sptep             136 arch/x86/kvm/mmu_audit.c 	gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);
sptep             145 arch/x86/kvm/mmu_audit.c 		       (long int)(sptep - rev_sp->spt), rev_sp->gfn);
sptep             155 arch/x86/kvm/mmu_audit.c 			     *sptep);
sptep             160 arch/x86/kvm/mmu_audit.c static void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu, u64 *sptep, int level)
sptep             162 arch/x86/kvm/mmu_audit.c 	if (is_shadow_present_pte(*sptep) && is_last_spte(*sptep, level))
sptep             163 arch/x86/kvm/mmu_audit.c 		inspect_spte_has_rmap(vcpu->kvm, sptep);
sptep             166 arch/x86/kvm/mmu_audit.c static void audit_spte_after_sync(struct kvm_vcpu *vcpu, u64 *sptep, int level)
sptep             168 arch/x86/kvm/mmu_audit.c 	struct kvm_mmu_page *sp = page_header(__pa(sptep));
sptep             193 arch/x86/kvm/mmu_audit.c 	u64 *sptep;
sptep             205 arch/x86/kvm/mmu_audit.c 	for_each_rmap_spte(rmap_head, &iter, sptep) {
sptep             206 arch/x86/kvm/mmu_audit.c 		if (is_writable_pte(*sptep))
sptep             224 arch/x86/kvm/mmu_audit.c static void audit_spte(struct kvm_vcpu *vcpu, u64 *sptep, int level)
sptep             226 arch/x86/kvm/mmu_audit.c 	audit_sptes_have_rmaps(vcpu, sptep, level);
sptep             227 arch/x86/kvm/mmu_audit.c 	audit_mappings(vcpu, sptep, level);
sptep             228 arch/x86/kvm/mmu_audit.c 	audit_spte_after_sync(vcpu, sptep, level);
sptep             205 arch/x86/kvm/mmutrace.h 	TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access, unsigned int gen),
sptep             206 arch/x86/kvm/mmutrace.h 	TP_ARGS(sptep, gfn, access, gen),
sptep             209 arch/x86/kvm/mmutrace.h 		__field(void *, sptep)
sptep             216 arch/x86/kvm/mmutrace.h 		__entry->sptep = sptep;
sptep             222 arch/x86/kvm/mmutrace.h 	TP_printk("sptep:%p gfn %llx access %x gen %x", __entry->sptep,
sptep             253 arch/x86/kvm/mmutrace.h 		 u64 *sptep, u64 old_spte, bool retry),
sptep             254 arch/x86/kvm/mmutrace.h 	TP_ARGS(vcpu, cr2_or_gpa, error_code, sptep, old_spte, retry),
sptep             260 arch/x86/kvm/mmutrace.h 		__field(u64 *, sptep)
sptep             270 arch/x86/kvm/mmutrace.h 		__entry->sptep = sptep;
sptep             272 arch/x86/kvm/mmutrace.h 		__entry->new_spte = *sptep;
sptep             279 arch/x86/kvm/mmutrace.h 		  kvm_mmu_trace_pferr_flags), __entry->sptep,
sptep             331 arch/x86/kvm/mmutrace.h 	TP_PROTO(int level, gfn_t gfn, u64 *sptep),
sptep             332 arch/x86/kvm/mmutrace.h 	TP_ARGS(level, gfn, sptep),
sptep             337 arch/x86/kvm/mmutrace.h 		__field(u64, sptep)
sptep             347 arch/x86/kvm/mmutrace.h 		__entry->spte = *sptep;
sptep             348 arch/x86/kvm/mmutrace.h 		__entry->sptep = virt_to_phys(sptep);
sptep             361 arch/x86/kvm/mmutrace.h 		  __entry->level, __entry->sptep
sptep             579 arch/x86/kvm/paging_tmpl.h 				u64 *sptep)
sptep             586 arch/x86/kvm/paging_tmpl.h 	sp = page_header(__pa(sptep));
sptep             592 arch/x86/kvm/paging_tmpl.h 		return __direct_pte_prefetch(vcpu, sp, sptep);
sptep             594 arch/x86/kvm/paging_tmpl.h 	i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
sptep             598 arch/x86/kvm/paging_tmpl.h 		if (spte == sptep)
sptep             648 arch/x86/kvm/paging_tmpl.h 		clear_sp_write_flooding_count(it.sptep);
sptep             649 arch/x86/kvm/paging_tmpl.h 		drop_large_spte(vcpu, it.sptep);
sptep             652 arch/x86/kvm/paging_tmpl.h 		if (!is_shadow_present_pte(*it.sptep)) {
sptep             666 arch/x86/kvm/paging_tmpl.h 			link_shadow_page(vcpu, it.sptep, sp);
sptep             679 arch/x86/kvm/paging_tmpl.h 		clear_sp_write_flooding_count(it.sptep);
sptep             691 arch/x86/kvm/paging_tmpl.h 		validate_direct_spte(vcpu, it.sptep, direct_access);
sptep             693 arch/x86/kvm/paging_tmpl.h 		drop_large_spte(vcpu, it.sptep);
sptep             695 arch/x86/kvm/paging_tmpl.h 		if (!is_shadow_present_pte(*it.sptep)) {
sptep             698 arch/x86/kvm/paging_tmpl.h 			link_shadow_page(vcpu, it.sptep, sp);
sptep             704 arch/x86/kvm/paging_tmpl.h 	ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault,
sptep             706 arch/x86/kvm/paging_tmpl.h 	FNAME(pte_prefetch)(vcpu, gw, it.sptep);
sptep             897 arch/x86/kvm/paging_tmpl.h 	u64 *sptep;
sptep             915 arch/x86/kvm/paging_tmpl.h 		sptep = iterator.sptep;
sptep             917 arch/x86/kvm/paging_tmpl.h 		sp = page_header(__pa(sptep));
sptep             918 arch/x86/kvm/paging_tmpl.h 		if (is_last_spte(*sptep, level)) {
sptep             926 arch/x86/kvm/paging_tmpl.h 			pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
sptep             928 arch/x86/kvm/paging_tmpl.h 			if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
sptep             939 arch/x86/kvm/paging_tmpl.h 			FNAME(update_pte)(vcpu, sp, sptep, &gpte);
sptep             942 arch/x86/kvm/paging_tmpl.h 		if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)