fault_ipa          26 arch/arm/include/asm/kvm_mmio.h 		 phys_addr_t fault_ipa);
fault_ipa          25 arch/arm64/include/asm/kvm_mmio.h 		 phys_addr_t fault_ipa);
fault_ipa          39 arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c 	phys_addr_t fault_ipa;
fault_ipa          44 arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c 	fault_ipa  = kvm_vcpu_get_fault_ipa(vcpu);
fault_ipa          45 arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c 	fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
fault_ipa          48 arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c 	if (fault_ipa <  vgic->vgic_cpu_base ||
fault_ipa          49 arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c 	    fault_ipa >= (vgic->vgic_cpu_base + KVM_VGIC_V2_CPU_SIZE))
fault_ipa          59 arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c 	if (fault_ipa & 3) {
fault_ipa          66 arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c 	addr += fault_ipa - vgic->vgic_cpu_base;
fault_ipa         157 virt/kvm/arm/mmio.c 		 phys_addr_t fault_ipa)
fault_ipa         186 virt/kvm/arm/mmio.c 		trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, &data);
fault_ipa         189 virt/kvm/arm/mmio.c 		ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
fault_ipa         193 virt/kvm/arm/mmio.c 			       fault_ipa, NULL);
fault_ipa         195 virt/kvm/arm/mmio.c 		ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
fault_ipa         201 virt/kvm/arm/mmio.c 	run->mmio.phys_addr	= fault_ipa;
fault_ipa        1669 virt/kvm/arm/mmu.c static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
fault_ipa        1677 virt/kvm/arm/mmu.c 	gfn_t gfn = fault_ipa >> PAGE_SHIFT;
fault_ipa        1721 virt/kvm/arm/mmu.c 		gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
fault_ipa        1786 virt/kvm/arm/mmu.c 		    transparent_hugepage_adjust(&pfn, &fault_ipa))
fault_ipa        1808 virt/kvm/arm/mmu.c 		(fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa));
fault_ipa        1820 virt/kvm/arm/mmu.c 		ret = stage2_set_pud_huge(kvm, memcache, fault_ipa, &new_pud);
fault_ipa        1832 virt/kvm/arm/mmu.c 		ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
fault_ipa        1844 virt/kvm/arm/mmu.c 		ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
fault_ipa        1861 virt/kvm/arm/mmu.c static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
fault_ipa        1869 virt/kvm/arm/mmu.c 	trace_kvm_access_fault(fault_ipa);
fault_ipa        1873 virt/kvm/arm/mmu.c 	if (!stage2_get_leaf_entry(vcpu->kvm, fault_ipa, &pud, &pmd, &pte))
fault_ipa        1911 virt/kvm/arm/mmu.c 	phys_addr_t fault_ipa;
fault_ipa        1920 virt/kvm/arm/mmu.c 	fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
fault_ipa        1929 virt/kvm/arm/mmu.c 		if (!kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_hsr(vcpu)))
fault_ipa        1939 virt/kvm/arm/mmu.c 			      kvm_vcpu_get_hfar(vcpu), fault_ipa);
fault_ipa        1953 virt/kvm/arm/mmu.c 	gfn = fault_ipa >> PAGE_SHIFT;
fault_ipa        1986 virt/kvm/arm/mmu.c 		fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
fault_ipa        1987 virt/kvm/arm/mmu.c 		ret = io_mem_abort(vcpu, run, fault_ipa);
fault_ipa        1992 virt/kvm/arm/mmu.c 	VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
fault_ipa        1995 virt/kvm/arm/mmu.c 		handle_access_fault(vcpu, fault_ipa);
fault_ipa        2000 virt/kvm/arm/mmu.c 	ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);