new_spte          833 arch/x86/kvm/mmu.c static void mmu_spte_set(u64 *sptep, u64 new_spte)
new_spte          836 arch/x86/kvm/mmu.c 	__set_spte(sptep, new_spte);
new_spte          843 arch/x86/kvm/mmu.c static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
new_spte          847 arch/x86/kvm/mmu.c 	WARN_ON(!is_shadow_present_pte(new_spte));
new_spte          850 arch/x86/kvm/mmu.c 		mmu_spte_set(sptep, new_spte);
new_spte          855 arch/x86/kvm/mmu.c 		__update_clear_spte_fast(sptep, new_spte);
new_spte          857 arch/x86/kvm/mmu.c 		old_spte = __update_clear_spte_slow(sptep, new_spte);
new_spte          859 arch/x86/kvm/mmu.c 	WARN_ON(spte_to_pfn(old_spte) != spte_to_pfn(new_spte));
new_spte          875 arch/x86/kvm/mmu.c static bool mmu_spte_update(u64 *sptep, u64 new_spte)
new_spte          878 arch/x86/kvm/mmu.c 	u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);
new_spte          889 arch/x86/kvm/mmu.c 	      !is_writable_pte(new_spte))
new_spte          897 arch/x86/kvm/mmu.c 	if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) {
new_spte          902 arch/x86/kvm/mmu.c 	if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) {
new_spte          993 arch/x86/kvm/mmu.c 	u64 new_spte = spte;
new_spte         1000 arch/x86/kvm/mmu.c 	new_spte &= ~shadow_acc_track_mask;
new_spte         1001 arch/x86/kvm/mmu.c 	new_spte &= ~(shadow_acc_track_saved_bits_mask <<
new_spte         1003 arch/x86/kvm/mmu.c 	new_spte |= saved_bits;
new_spte         1005 arch/x86/kvm/mmu.c 	return new_spte;
new_spte         1883 arch/x86/kvm/mmu.c 	u64 new_spte;
new_spte         1901 arch/x86/kvm/mmu.c 			new_spte = *sptep & ~PT64_BASE_ADDR_MASK;
new_spte         1902 arch/x86/kvm/mmu.c 			new_spte |= (u64)new_pfn << PAGE_SHIFT;
new_spte         1904 arch/x86/kvm/mmu.c 			new_spte &= ~PT_WRITABLE_MASK;
new_spte         1905 arch/x86/kvm/mmu.c 			new_spte &= ~SPTE_HOST_WRITEABLE;
new_spte         1907 arch/x86/kvm/mmu.c 			new_spte = mark_spte_for_access_track(new_spte);
new_spte         1910 arch/x86/kvm/mmu.c 			mmu_spte_set(sptep, new_spte);
new_spte         3486 arch/x86/kvm/mmu.c 			u64 *sptep, u64 old_spte, u64 new_spte)
new_spte         3504 arch/x86/kvm/mmu.c 	if (cmpxchg64(sptep, old_spte, new_spte) != old_spte)
new_spte         3507 arch/x86/kvm/mmu.c 	if (is_writable_pte(new_spte) && !is_writable_pte(old_spte)) {
new_spte         3554 arch/x86/kvm/mmu.c 		u64 new_spte;
new_spte         3580 arch/x86/kvm/mmu.c 		new_spte = spte;
new_spte         3583 arch/x86/kvm/mmu.c 			new_spte = restore_acc_track_spte(new_spte);
new_spte         3593 arch/x86/kvm/mmu.c 			new_spte |= PT_WRITABLE_MASK;
new_spte         3611 arch/x86/kvm/mmu.c 		if (new_spte == spte ||
new_spte         3612 arch/x86/kvm/mmu.c 		    !is_access_allowed(error_code, new_spte))
new_spte         3622 arch/x86/kvm/mmu.c 							new_spte);
new_spte          262 arch/x86/kvm/mmutrace.h 		__field(u64, new_spte)
new_spte          272 arch/x86/kvm/mmutrace.h 		__entry->new_spte = *sptep;
new_spte          280 arch/x86/kvm/mmutrace.h 		  __entry->old_spte, __entry->new_spte,
new_spte          281 arch/x86/kvm/mmutrace.h 		  __spte_satisfied(old_spte), __spte_satisfied(new_spte)