pte_access       3050 arch/x86/kvm/mmu.c 		    unsigned pte_access, int level,
pte_access       3058 arch/x86/kvm/mmu.c 	if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access))
pte_access       3077 arch/x86/kvm/mmu.c 	if (level > PT_PAGE_TABLE_LEVEL && (pte_access & ACC_EXEC_MASK) &&
pte_access       3079 arch/x86/kvm/mmu.c 		pte_access &= ~ACC_EXEC_MASK;
pte_access       3082 arch/x86/kvm/mmu.c 	if (pte_access & ACC_EXEC_MASK)
pte_access       3087 arch/x86/kvm/mmu.c 	if (pte_access & ACC_USER_MASK)
pte_access       3099 arch/x86/kvm/mmu.c 		pte_access &= ~ACC_WRITE_MASK;
pte_access       3106 arch/x86/kvm/mmu.c 	if (pte_access & ACC_WRITE_MASK) {
pte_access       3133 arch/x86/kvm/mmu.c 			pte_access &= ~ACC_WRITE_MASK;
pte_access       3138 arch/x86/kvm/mmu.c 	if (pte_access & ACC_WRITE_MASK) {
pte_access       3153 arch/x86/kvm/mmu.c static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
pte_access       3188 arch/x86/kvm/mmu.c 	set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn,
pte_access        157 arch/x86/kvm/mmu.h 				  unsigned pte_access, unsigned pte_pkey,
pte_access        179 arch/x86/kvm/mmu.h 	bool fault = (mmu->permissions[index] >> pte_access) & 1;
pte_access        196 arch/x86/kvm/mmu.h 			((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
pte_access         94 arch/x86/kvm/paging_tmpl.h 	unsigned pte_access;
pte_access        304 arch/x86/kvm/paging_tmpl.h 	u64 pt_access, pte_access;
pte_access        344 arch/x86/kvm/paging_tmpl.h 	pte_access = ~0;
pte_access        351 arch/x86/kvm/paging_tmpl.h 		pt_access = pte_access;
pte_access        398 arch/x86/kvm/paging_tmpl.h 		pte_access = pt_access & (pte ^ walk_nx_mask);
pte_access        412 arch/x86/kvm/paging_tmpl.h 	accessed_dirty = have_ad ? pte_access & PT_GUEST_ACCESSED_MASK : 0;
pte_access        416 arch/x86/kvm/paging_tmpl.h 	walker->pte_access = FNAME(gpte_access)(pte_access ^ walk_nx_mask);
pte_access        417 arch/x86/kvm/paging_tmpl.h 	errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
pte_access        434 arch/x86/kvm/paging_tmpl.h 		FNAME(protect_clean_gpte)(mmu, &walker->pte_access, pte);
pte_access        453 arch/x86/kvm/paging_tmpl.h 		 __func__, (u64)pte, walker->pte_access, walker->pt_access);
pte_access        488 arch/x86/kvm/paging_tmpl.h 		vcpu->arch.exit_qualification |= (pte_access & 0x7) << 3;
pte_access        519 arch/x86/kvm/paging_tmpl.h 	unsigned pte_access;
pte_access        529 arch/x86/kvm/paging_tmpl.h 	pte_access = sp->role.access & FNAME(gpte_access)(gpte);
pte_access        530 arch/x86/kvm/paging_tmpl.h 	FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
pte_access        532 arch/x86/kvm/paging_tmpl.h 			no_dirty_log && (pte_access & ACC_WRITE_MASK));
pte_access        540 arch/x86/kvm/paging_tmpl.h 	mmu_set_spte(vcpu, spte, pte_access, 0, PT_PAGE_TABLE_LEVEL, gfn, pfn,
pte_access        626 arch/x86/kvm/paging_tmpl.h 	direct_access = gw->pte_access;
pte_access        704 arch/x86/kvm/paging_tmpl.h 	ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault,
pte_access        740 arch/x86/kvm/paging_tmpl.h 	if (!(walker->pte_access & ACC_WRITE_MASK ||
pte_access        837 arch/x86/kvm/paging_tmpl.h 	if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r))
pte_access        844 arch/x86/kvm/paging_tmpl.h 	if (write_fault && !(walker.pte_access & ACC_WRITE_MASK) &&
pte_access        847 arch/x86/kvm/paging_tmpl.h 		walker.pte_access |= ACC_WRITE_MASK;
pte_access        848 arch/x86/kvm/paging_tmpl.h 		walker.pte_access &= ~ACC_USER_MASK;
pte_access        857 arch/x86/kvm/paging_tmpl.h 			walker.pte_access &= ~ACC_EXEC_MASK;
pte_access       1020 arch/x86/kvm/paging_tmpl.h 		unsigned pte_access;
pte_access       1046 arch/x86/kvm/paging_tmpl.h 		pte_access = sp->role.access;
pte_access       1047 arch/x86/kvm/paging_tmpl.h 		pte_access &= FNAME(gpte_access)(gpte);
pte_access       1048 arch/x86/kvm/paging_tmpl.h 		FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
pte_access       1050 arch/x86/kvm/paging_tmpl.h 		if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access,
pte_access       1070 arch/x86/kvm/paging_tmpl.h 					 pte_access, PT_PAGE_TABLE_LEVEL,