old_spte          845 arch/x86/kvm/mmu.c 	u64 old_spte = *sptep;
old_spte          849 arch/x86/kvm/mmu.c 	if (!is_shadow_present_pte(old_spte)) {
old_spte          851 arch/x86/kvm/mmu.c 		return old_spte;
old_spte          854 arch/x86/kvm/mmu.c 	if (!spte_has_volatile_bits(old_spte))
old_spte          857 arch/x86/kvm/mmu.c 		old_spte = __update_clear_spte_slow(sptep, new_spte);
old_spte          859 arch/x86/kvm/mmu.c 	WARN_ON(spte_to_pfn(old_spte) != spte_to_pfn(new_spte));
old_spte          861 arch/x86/kvm/mmu.c 	return old_spte;
old_spte          878 arch/x86/kvm/mmu.c 	u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);
old_spte          880 arch/x86/kvm/mmu.c 	if (!is_shadow_present_pte(old_spte))
old_spte          888 arch/x86/kvm/mmu.c 	if (spte_can_locklessly_be_made_writable(old_spte) &&
old_spte          897 arch/x86/kvm/mmu.c 	if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) {
old_spte          899 arch/x86/kvm/mmu.c 		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
old_spte          902 arch/x86/kvm/mmu.c 	if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) {
old_spte          904 arch/x86/kvm/mmu.c 		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
old_spte          919 arch/x86/kvm/mmu.c 	u64 old_spte = *sptep;
old_spte          921 arch/x86/kvm/mmu.c 	if (!spte_has_volatile_bits(old_spte))
old_spte          924 arch/x86/kvm/mmu.c 		old_spte = __update_clear_spte_slow(sptep, 0ull);
old_spte          926 arch/x86/kvm/mmu.c 	if (!is_shadow_present_pte(old_spte))
old_spte          929 arch/x86/kvm/mmu.c 	pfn = spte_to_pfn(old_spte);
old_spte          938 arch/x86/kvm/mmu.c 	if (is_accessed_spte(old_spte))
old_spte          941 arch/x86/kvm/mmu.c 	if (is_dirty_spte(old_spte))
old_spte         3486 arch/x86/kvm/mmu.c 			u64 *sptep, u64 old_spte, u64 new_spte)
old_spte         3504 arch/x86/kvm/mmu.c 	if (cmpxchg64(sptep, old_spte, new_spte) != old_spte)
old_spte         3507 arch/x86/kvm/mmu.c 	if (is_writable_pte(new_spte) && !is_writable_pte(old_spte)) {
old_spte          253 arch/x86/kvm/mmutrace.h 		 u64 *sptep, u64 old_spte, bool retry),
old_spte          254 arch/x86/kvm/mmutrace.h 	TP_ARGS(vcpu, cr2_or_gpa, error_code, sptep, old_spte, retry),
old_spte          261 arch/x86/kvm/mmutrace.h 		__field(u64, old_spte)
old_spte          271 arch/x86/kvm/mmutrace.h 		__entry->old_spte = old_spte;
old_spte          280 arch/x86/kvm/mmutrace.h 		  __entry->old_spte, __entry->new_spte,
old_spte          281 arch/x86/kvm/mmutrace.h 		  __spte_satisfied(old_spte), __spte_satisfied(new_spte)