eaddr 503 arch/arm/mm/alignment.c unsigned long eaddr, newaddr; eaddr 517 arch/arm/mm/alignment.c newaddr = eaddr = regs->uregs[rn]; eaddr 523 arch/arm/mm/alignment.c eaddr = newaddr; eaddr 526 arch/arm/mm/alignment.c eaddr += 4; eaddr 540 arch/arm/mm/alignment.c if (addr != eaddr) { eaddr 543 arch/arm/mm/alignment.c instruction_pointer(regs), instr, addr, eaddr); eaddr 555 arch/arm/mm/alignment.c get32t_unaligned_check(val, eaddr); eaddr 558 arch/arm/mm/alignment.c put32t_unaligned_check(regs->uregs[rd], eaddr); eaddr 559 arch/arm/mm/alignment.c eaddr += 4; eaddr 568 arch/arm/mm/alignment.c get32_unaligned_check(val, eaddr); eaddr 571 arch/arm/mm/alignment.c put32_unaligned_check(regs->uregs[rd], eaddr); eaddr 572 arch/arm/mm/alignment.c eaddr += 4; eaddr 155 arch/powerpc/include/asm/kvm_book3s.h extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); eaddr 156 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size); eaddr 161 arch/powerpc/include/asm/kvm_book3s.h extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, eaddr 181 arch/powerpc/include/asm/kvm_book3s.h gva_t eaddr, void *to, void *from, eaddr 183 arch/powerpc/include/asm/kvm_book3s.h extern long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, eaddr 185 arch/powerpc/include/asm/kvm_book3s.h extern long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, eaddr 187 arch/powerpc/include/asm/kvm_book3s.h extern int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr, eaddr 190 arch/powerpc/include/asm/kvm_book3s.h extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr, eaddr 193 arch/powerpc/include/asm/kvm_book3s.h extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, eaddr 228 arch/powerpc/include/asm/kvm_book3s.h extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); eaddr 379 arch/powerpc/include/asm/kvm_host.h ulong eaddr; eaddr 396 arch/powerpc/include/asm/kvm_host.h int (*slbfee)(struct kvm_vcpu *vcpu, gva_t eaddr, ulong *ret_slb); eaddr 402 arch/powerpc/include/asm/kvm_host.h int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, eaddr 407 arch/powerpc/include/asm/kvm_host.h u64 (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data); eaddr 475 arch/powerpc/include/asm/kvm_host.h unsigned long eaddr; eaddr 89 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, eaddr 91 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, eaddr 112 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); eaddr 113 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); eaddr 115 arch/powerpc/include/asm/kvm_ppc.h gva_t eaddr); eaddr 118 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, eaddr 320 arch/powerpc/include/asm/kvm_ppc.h int (*load_from_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, eaddr 322 arch/powerpc/include/asm/kvm_ppc.h int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, eaddr 452 arch/powerpc/kvm/book3s.c int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid, eaddr 461 arch/powerpc/kvm/book3s.c r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite); eaddr 463 arch/powerpc/kvm/book3s.c pte->eaddr = eaddr; eaddr 464 arch/powerpc/kvm/book3s.c pte->raddr = eaddr & KVM_PAM; eaddr 465 arch/powerpc/kvm/book3s.c pte->vpage = VSID_REAL | eaddr >> 12; eaddr 474 arch/powerpc/kvm/book3s.c ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)) eaddr 69 arch/powerpc/kvm/book3s_32_mmu.c static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, eaddr 75 arch/powerpc/kvm/book3s_32_mmu.c static u32 find_sr(struct kvm_vcpu *vcpu, gva_t eaddr) eaddr 77 arch/powerpc/kvm/book3s_32_mmu.c return kvmppc_get_sr(vcpu, (eaddr >> 28) & 0xf); eaddr 80 arch/powerpc/kvm/book3s_32_mmu.c static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, eaddr 86 arch/powerpc/kvm/book3s_32_mmu.c if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data, false)) eaddr 89 arch/powerpc/kvm/book3s_32_mmu.c kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); eaddr 90 arch/powerpc/kvm/book3s_32_mmu.c return (((u64)eaddr >> 12) & 0xffff) | (vsid << 16); eaddr 99 arch/powerpc/kvm/book3s_32_mmu.c u32 sre, gva_t eaddr, eaddr 106 arch/powerpc/kvm/book3s_32_mmu.c page = (eaddr & 0x0FFFFFFF) >> 12; eaddr 117 arch/powerpc/kvm/book3s_32_mmu.c kvmppc_get_pc(vcpu), eaddr, vcpu_book3s->sdr1, pteg, eaddr 126 arch/powerpc/kvm/book3s_32_mmu.c static u32 kvmppc_mmu_book3s_32_get_ptem(u32 sre, gva_t eaddr, bool primary) eaddr 128 arch/powerpc/kvm/book3s_32_mmu.c return ((eaddr & 0x0fffffff) >> 22) | (sr_vsid(sre) << 7) | eaddr 132 arch/powerpc/kvm/book3s_32_mmu.c static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, eaddr 157 arch/powerpc/kvm/book3s_32_mmu.c data ? 'd' : 'i', i, eaddr, bat->bepi, eaddr 160 arch/powerpc/kvm/book3s_32_mmu.c if ((eaddr & bat->bepi_mask) == bat->bepi) { eaddr 163 arch/powerpc/kvm/book3s_32_mmu.c eaddr >> SID_SHIFT, &vsid); eaddr 165 arch/powerpc/kvm/book3s_32_mmu.c pte->vpage = (((u64)eaddr >> 12) & 0xffff) | vsid; eaddr 167 arch/powerpc/kvm/book3s_32_mmu.c pte->raddr = bat->brpn | (eaddr & ~bat->bepi_mask); eaddr 187 arch/powerpc/kvm/book3s_32_mmu.c static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, eaddr 199 arch/powerpc/kvm/book3s_32_mmu.c sre = find_sr(vcpu, eaddr); eaddr 201 arch/powerpc/kvm/book3s_32_mmu.c dprintk_pte("SR 0x%lx: vsid=0x%x, raw=0x%x\n", eaddr >> 28, eaddr 204 arch/powerpc/kvm/book3s_32_mmu.c pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data); eaddr 206 arch/powerpc/kvm/book3s_32_mmu.c ptegp = kvmppc_mmu_book3s_32_get_pteg(vcpu, sre, eaddr, primary); eaddr 212 arch/powerpc/kvm/book3s_32_mmu.c ptem = kvmppc_mmu_book3s_32_get_ptem(sre, eaddr, primary); eaddr 226 arch/powerpc/kvm/book3s_32_mmu.c pte->raddr = (pte1 & ~(0xFFFULL)) | (eaddr & 0xFFF); eaddr 295 arch/powerpc/kvm/book3s_32_mmu.c static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, eaddr 302 arch/powerpc/kvm/book3s_32_mmu.c pte->eaddr = eaddr; eaddr 307 arch/powerpc/kvm/book3s_32_mmu.c unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) && eaddr 309 arch/powerpc/kvm/book3s_32_mmu.c pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data); eaddr 319 arch/powerpc/kvm/book3s_32_mmu.c r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data, iswrite); eaddr 321 arch/powerpc/kvm/book3s_32_mmu.c r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, eaddr 324 arch/powerpc/kvm/book3s_32_mmu.c r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, eaddr 59 arch/powerpc/kvm/book3s_32_mmu_host.c asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory"); eaddr 106 arch/powerpc/kvm/book3s_32_mmu_host.c static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr, eaddr 112 arch/powerpc/kvm/book3s_32_mmu_host.c page = (eaddr & ~ESID_MASK) >> 12; eaddr 138 arch/powerpc/kvm/book3s_32_mmu_host.c u32 eaddr = orig_pte->eaddr; eaddr 158 arch/powerpc/kvm/book3s_32_mmu_host.c vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); eaddr 161 arch/powerpc/kvm/book3s_32_mmu_host.c kvmppc_mmu_map_segment(vcpu, eaddr); eaddr 168 arch/powerpc/kvm/book3s_32_mmu_host.c ((eaddr & ~ESID_MASK) >> VPN_SHIFT); eaddr 176 arch/powerpc/kvm/book3s_32_mmu_host.c pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary); eaddr 194 arch/powerpc/kvm/book3s_32_mmu_host.c pteg0 = ((eaddr & 0x0fffffff) >> 22) | (vsid << 7) | PTE_V | eaddr 243 arch/powerpc/kvm/book3s_32_mmu_host.c orig_pte->eaddr, (ulong)pteg, vpn, eaddr 302 arch/powerpc/kvm/book3s_32_mmu_host.c int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) eaddr 304 arch/powerpc/kvm/book3s_32_mmu_host.c u32 esid = eaddr >> SID_SHIFT; eaddr 43 arch/powerpc/kvm/book3s_64_mmu.c gva_t eaddr) eaddr 46 arch/powerpc/kvm/book3s_64_mmu.c u64 esid = GET_ESID(eaddr); eaddr 47 arch/powerpc/kvm/book3s_64_mmu.c u64 esid_1t = GET_ESID_1T(eaddr); eaddr 63 arch/powerpc/kvm/book3s_64_mmu.c eaddr, esid, esid_1t); eaddr 87 arch/powerpc/kvm/book3s_64_mmu.c static u64 kvmppc_slb_calc_vpn(struct kvmppc_slb *slb, gva_t eaddr) eaddr 89 arch/powerpc/kvm/book3s_64_mmu.c eaddr &= kvmppc_slb_offset_mask(slb); eaddr 91 arch/powerpc/kvm/book3s_64_mmu.c return (eaddr >> VPN_SHIFT) | eaddr 95 arch/powerpc/kvm/book3s_64_mmu.c static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, eaddr 100 arch/powerpc/kvm/book3s_64_mmu.c slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr); eaddr 104 arch/powerpc/kvm/book3s_64_mmu.c return kvmppc_slb_calc_vpn(slb, eaddr); eaddr 123 arch/powerpc/kvm/book3s_64_mmu.c static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr) eaddr 127 arch/powerpc/kvm/book3s_64_mmu.c return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p); eaddr 131 arch/powerpc/kvm/book3s_64_mmu.c struct kvmppc_slb *slbe, gva_t eaddr, eaddr 142 arch/powerpc/kvm/book3s_64_mmu.c vpn = kvmppc_slb_calc_vpn(slbe, eaddr); eaddr 169 arch/powerpc/kvm/book3s_64_mmu.c static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr) eaddr 174 arch/powerpc/kvm/book3s_64_mmu.c avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr); eaddr 205 arch/powerpc/kvm/book3s_64_mmu.c static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, eaddr 225 arch/powerpc/kvm/book3s_64_mmu.c unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) && eaddr 227 arch/powerpc/kvm/book3s_64_mmu.c gpte->eaddr = eaddr; eaddr 228 arch/powerpc/kvm/book3s_64_mmu.c gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); eaddr 240 arch/powerpc/kvm/book3s_64_mmu.c slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr); eaddr 244 arch/powerpc/kvm/book3s_64_mmu.c avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr); eaddr 261 arch/powerpc/kvm/book3s_64_mmu.c ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu, slbe, eaddr, second); eaddr 308 arch/powerpc/kvm/book3s_64_mmu.c gpte->eaddr = eaddr; eaddr 309 arch/powerpc/kvm/book3s_64_mmu.c gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); eaddr 312 arch/powerpc/kvm/book3s_64_mmu.c gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask); eaddr 339 arch/powerpc/kvm/book3s_64_mmu.c eaddr, avpn, gpte->vpage, gpte->raddr); eaddr 427 arch/powerpc/kvm/book3s_64_mmu.c static int kvmppc_mmu_book3s_64_slbfee(struct kvm_vcpu *vcpu, gva_t eaddr, eaddr 430 arch/powerpc/kvm/book3s_64_mmu.c struct kvmppc_slb *slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr); eaddr 106 arch/powerpc/kvm/book3s_64_mmu_host.c vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); eaddr 109 arch/powerpc/kvm/book3s_64_mmu_host.c ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr); eaddr 115 arch/powerpc/kvm/book3s_64_mmu_host.c vsid, orig_pte->eaddr); eaddr 121 arch/powerpc/kvm/book3s_64_mmu_host.c vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M); eaddr 217 arch/powerpc/kvm/book3s_64_mmu_host.c vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid); eaddr 310 arch/powerpc/kvm/book3s_64_mmu_host.c int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) eaddr 313 arch/powerpc/kvm/book3s_64_mmu_host.c u64 esid = eaddr >> SID_SHIFT; eaddr 314 arch/powerpc/kvm/book3s_64_mmu_host.c u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V; eaddr 321 arch/powerpc/kvm/book3s_64_mmu_host.c slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK); eaddr 311 arch/powerpc/kvm/book3s_64_mmu_hv.c gva_t eaddr) eaddr 325 arch/powerpc/kvm/book3s_64_mmu_hv.c if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0) eaddr 340 arch/powerpc/kvm/book3s_64_mmu_hv.c static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, eaddr 353 arch/powerpc/kvm/book3s_64_mmu_hv.c return kvmppc_mmu_radix_xlate(vcpu, eaddr, gpte, data, iswrite); eaddr 357 arch/powerpc/kvm/book3s_64_mmu_hv.c slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr); eaddr 368 arch/powerpc/kvm/book3s_64_mmu_hv.c index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, eaddr 383 arch/powerpc/kvm/book3s_64_mmu_hv.c gpte->eaddr = eaddr; eaddr 384 arch/powerpc/kvm/book3s_64_mmu_hv.c gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); eaddr 406 arch/powerpc/kvm/book3s_64_mmu_hv.c gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr); eaddr 31 arch/powerpc/kvm/book3s_64_mmu_radix.c gva_t eaddr, void *to, void *from, eaddr 40 arch/powerpc/kvm/book3s_64_mmu_radix.c return plpar_hcall_norets(H_COPY_TOFROM_GUEST, lpid, pid, eaddr, eaddr 47 arch/powerpc/kvm/book3s_64_mmu_radix.c from = (void *) (eaddr | (quadrant << 62)); eaddr 49 arch/powerpc/kvm/book3s_64_mmu_radix.c to = (void *) (eaddr | (quadrant << 62)); eaddr 84 arch/powerpc/kvm/book3s_64_mmu_radix.c static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, eaddr 91 arch/powerpc/kvm/book3s_64_mmu_radix.c if (eaddr & (0x3FFUL << 52)) eaddr 99 arch/powerpc/kvm/book3s_64_mmu_radix.c if (((eaddr >> 62) & 0x3) == 0x3) eaddr 102 arch/powerpc/kvm/book3s_64_mmu_radix.c eaddr &= ~(0xFFFUL << 52); eaddr 104 arch/powerpc/kvm/book3s_64_mmu_radix.c return __kvmhv_copy_tofrom_guest_radix(lpid, pid, eaddr, to, from, n); eaddr 107 arch/powerpc/kvm/book3s_64_mmu_radix.c long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *to, eaddr 112 arch/powerpc/kvm/book3s_64_mmu_radix.c ret = kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, to, NULL, n); eaddr 120 arch/powerpc/kvm/book3s_64_mmu_radix.c long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *from, eaddr 123 arch/powerpc/kvm/book3s_64_mmu_radix.c return kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, NULL, from, n); eaddr 127 arch/powerpc/kvm/book3s_64_mmu_radix.c int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr, eaddr 157 arch/powerpc/kvm/book3s_64_mmu_radix.c index = (eaddr >> offset) & ((1UL << bits) - 1); eaddr 189 arch/powerpc/kvm/book3s_64_mmu_radix.c gpa |= eaddr & ((1ul << offset) - 1); eaddr 196 arch/powerpc/kvm/book3s_64_mmu_radix.c gpte->eaddr = eaddr; eaddr 220 arch/powerpc/kvm/book3s_64_mmu_radix.c int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr, eaddr 246 arch/powerpc/kvm/book3s_64_mmu_radix.c return kvmppc_mmu_walk_radix_tree(vcpu, eaddr, gpte, root, pte_ret_p); eaddr 249 arch/powerpc/kvm/book3s_64_mmu_radix.c int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, eaddr 257 arch/powerpc/kvm/book3s_64_mmu_radix.c switch (eaddr >> 62) { eaddr 268 arch/powerpc/kvm/book3s_64_mmu_radix.c ret = kvmppc_mmu_radix_translate_table(vcpu, eaddr, gpte, eaddr 5362 arch/powerpc/kvm/book3s_hv.c static int kvmhv_load_from_eaddr(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, eaddr 5368 arch/powerpc/kvm/book3s_hv.c rc = kvmhv_copy_from_guest_radix(vcpu, *eaddr, ptr, size); eaddr 5381 arch/powerpc/kvm/book3s_hv.c static int kvmhv_store_to_eaddr(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, eaddr 5387 arch/powerpc/kvm/book3s_hv.c rc = kvmhv_copy_to_guest_radix(vcpu, *eaddr, ptr, size); eaddr 478 arch/powerpc/kvm/book3s_hv_nested.c gva_t eaddr = kvmppc_get_gpr(vcpu, 6); eaddr 489 arch/powerpc/kvm/book3s_hv_nested.c if (eaddr & (0xFFFUL << 52)) eaddr 507 arch/powerpc/kvm/book3s_hv_nested.c eaddr, buf, NULL, n); eaddr 523 arch/powerpc/kvm/book3s_hv_nested.c eaddr, NULL, buf, n); eaddr 1081 arch/powerpc/kvm/book3s_hv_rm_mmu.c unsigned long eaddr, unsigned long slb_v, long mmio_update) eaddr 1091 arch/powerpc/kvm/book3s_hv_rm_mmu.c if ((entry->eaddr >> pshift) == (eaddr >> pshift) && eaddr 1115 arch/powerpc/kvm/book3s_hv_rm_mmu.c long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, eaddr 1144 arch/powerpc/kvm/book3s_hv_rm_mmu.c hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvmppc_hpt_mask(&kvm->arch.hpt); eaddr 1146 arch/powerpc/kvm/book3s_hv_rm_mmu.c avpn |= (eaddr & somask) >> 16; eaddr 1303 arch/powerpc/kvm/book3s_hv_rm_mmu.c cache_entry->eaddr = addr; eaddr 26 arch/powerpc/kvm/book3s_mmu_hpte.c static inline u64 kvmppc_mmu_hash_pte(u64 eaddr) eaddr 28 arch/powerpc/kvm/book3s_mmu_hpte.c return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE); eaddr 31 arch/powerpc/kvm/book3s_mmu_hpte.c static inline u64 kvmppc_mmu_hash_pte_long(u64 eaddr) eaddr 33 arch/powerpc/kvm/book3s_mmu_hpte.c return hash_64((eaddr & 0x0ffff000) >> PTE_SIZE, eaddr 66 arch/powerpc/kvm/book3s_mmu_hpte.c index = kvmppc_mmu_hash_pte(pte->pte.eaddr); eaddr 70 arch/powerpc/kvm/book3s_mmu_hpte.c index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr); eaddr 163 arch/powerpc/kvm/book3s_mmu_hpte.c if ((pte->pte.eaddr & ~0xfffUL) == guest_ea) eaddr 183 arch/powerpc/kvm/book3s_mmu_hpte.c if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea) eaddr 155 arch/powerpc/kvm/book3s_paired_singles.c static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) eaddr 163 arch/powerpc/kvm/book3s_paired_singles.c kvmppc_set_dar(vcpu, eaddr); eaddr 668 arch/powerpc/kvm/book3s_pr.c ulong eaddr, int vec) eaddr 686 arch/powerpc/kvm/book3s_pr.c page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite); eaddr 691 arch/powerpc/kvm/book3s_pr.c pte.raddr = eaddr & KVM_PAM; eaddr 692 arch/powerpc/kvm/book3s_pr.c pte.eaddr = eaddr; eaddr 693 arch/powerpc/kvm/book3s_pr.c pte.vpage = eaddr >> 12; eaddr 709 arch/powerpc/kvm/book3s_pr.c vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); eaddr 742 arch/powerpc/kvm/book3s_pr.c kvmppc_core_queue_data_storage(vcpu, eaddr, flags); eaddr 774 arch/powerpc/kvm/book3s_pr.c vcpu->arch.vaddr_accessed = pte.eaddr; eaddr 1236 arch/powerpc/kvm/booke.c unsigned long eaddr = vcpu->arch.fault_dear; eaddr 1243 arch/powerpc/kvm/booke.c (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) { eaddr 1253 arch/powerpc/kvm/booke.c gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); eaddr 1267 arch/powerpc/kvm/booke.c gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); eaddr 1277 arch/powerpc/kvm/booke.c kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); eaddr 1284 arch/powerpc/kvm/booke.c vcpu->arch.vaddr_accessed = eaddr; eaddr 1294 arch/powerpc/kvm/booke.c unsigned long eaddr = vcpu->arch.regs.nip; eaddr 1302 arch/powerpc/kvm/booke.c gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr); eaddr 1315 arch/powerpc/kvm/booke.c gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); eaddr 1325 arch/powerpc/kvm/booke.c kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); eaddr 1963 arch/powerpc/kvm/booke.c int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid, eaddr 1971 arch/powerpc/kvm/booke.c (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) { eaddr 1972 arch/powerpc/kvm/booke.c pte->eaddr = eaddr; eaddr 1974 arch/powerpc/kvm/booke.c (eaddr & ~PAGE_MASK); eaddr 1975 arch/powerpc/kvm/booke.c pte->vpage = eaddr >> PAGE_SHIFT; eaddr 1987 arch/powerpc/kvm/booke.c gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr); eaddr 1990 arch/powerpc/kvm/booke.c gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); eaddr 2000 arch/powerpc/kvm/booke.c gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); eaddr 2002 arch/powerpc/kvm/booke.c pte->eaddr = eaddr; eaddr 2003 arch/powerpc/kvm/booke.c pte->raddr = (gpaddr & PAGE_MASK) | (eaddr & ~PAGE_MASK); eaddr 2004 arch/powerpc/kvm/booke.c pte->vpage = eaddr >> PAGE_SHIFT; eaddr 238 arch/powerpc/kvm/e500.c u32 val, eaddr; eaddr 270 arch/powerpc/kvm/e500.c eaddr = get_tlb_eaddr(gtlbe); eaddr 275 arch/powerpc/kvm/e500.c asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr)); eaddr 81 arch/powerpc/kvm/e500_mmu.c gva_t eaddr, int tlbsel, unsigned int pid, int as) eaddr 88 arch/powerpc/kvm/e500_mmu.c set_base = gtlb0_set_base(vcpu_e500, eaddr); eaddr 91 arch/powerpc/kvm/e500_mmu.c if (eaddr < vcpu_e500->tlb1_min_eaddr || eaddr 92 arch/powerpc/kvm/e500_mmu.c eaddr > vcpu_e500->tlb1_max_eaddr) eaddr 104 arch/powerpc/kvm/e500_mmu.c if (eaddr < get_tlb_eaddr(tlbe)) eaddr 107 arch/powerpc/kvm/e500_mmu.c if (eaddr > get_tlb_end(tlbe)) eaddr 127 arch/powerpc/kvm/e500_mmu.c gva_t eaddr, int as) eaddr 143 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN) eaddr 155 arch/powerpc/kvm/e500_mmu.c gva_t eaddr; eaddr 169 arch/powerpc/kvm/e500_mmu.c eaddr = get_tlb_eaddr(tlbe); eaddr 171 arch/powerpc/kvm/e500_mmu.c min(vcpu_e500->tlb1_min_eaddr, eaddr); eaddr 173 arch/powerpc/kvm/e500_mmu.c eaddr = get_tlb_end(tlbe); eaddr 175 arch/powerpc/kvm/e500_mmu.c max(vcpu_e500->tlb1_max_eaddr, eaddr); eaddr 435 arch/powerpc/kvm/e500_mmu.c u64 eaddr = get_tlb_eaddr(gtlbe); eaddr 444 arch/powerpc/kvm/e500_mmu.c kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel)); eaddr 454 arch/powerpc/kvm/e500_mmu.c gva_t eaddr, unsigned int pid, int as) eaddr 460 arch/powerpc/kvm/e500_mmu.c esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as); eaddr 473 arch/powerpc/kvm/e500_mmu.c gva_t eaddr; eaddr 477 arch/powerpc/kvm/e500_mmu.c eaddr = tr->linear_address; eaddr 481 arch/powerpc/kvm/e500_mmu.c index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as); eaddr 487 arch/powerpc/kvm/e500_mmu.c tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr); eaddr 495 arch/powerpc/kvm/e500_mmu.c int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) eaddr 499 arch/powerpc/kvm/e500_mmu.c return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as); eaddr 502 arch/powerpc/kvm/e500_mmu.c int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) eaddr 506 arch/powerpc/kvm/e500_mmu.c return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as); eaddr 524 arch/powerpc/kvm/e500_mmu.c gva_t eaddr) eaddr 533 arch/powerpc/kvm/e500_mmu.c return get_tlb_raddr(gtlbe) | (eaddr & pgmask); eaddr 105 arch/powerpc/kvm/e500_mmu_host.c static u32 get_host_mas0(unsigned long eaddr) eaddr 115 arch/powerpc/kvm/e500_mmu_host.c asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET)); eaddr 586 arch/powerpc/kvm/e500_mmu_host.c void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, eaddr 606 arch/powerpc/kvm/e500_mmu_host.c &priv->ref, eaddr, &stlbe); eaddr 613 arch/powerpc/kvm/e500_mmu_host.c kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe, eaddr 631 arch/powerpc/kvm/e500_mmu_host.c hva_t eaddr; eaddr 708 arch/powerpc/kvm/e500_mmu_host.c eaddr = (unsigned long)kmap_atomic(page); eaddr 709 arch/powerpc/kvm/e500_mmu_host.c *instr = *(u32 *)(eaddr | (unsigned long)(addr & ~PAGE_MASK)); eaddr 710 arch/powerpc/kvm/e500_mmu_host.c kunmap_atomic((u32 *)eaddr); eaddr 59 arch/powerpc/kvm/e500mc.c gva_t eaddr; eaddr 68 arch/powerpc/kvm/e500mc.c eaddr = get_tlb_eaddr(gtlbe); eaddr 75 arch/powerpc/kvm/e500mc.c asm volatile("tlbsx 0, %[eaddr]\n" : : [eaddr] "r" (eaddr)); eaddr 323 arch/powerpc/kvm/powerpc.c int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, eaddr 333 arch/powerpc/kvm/powerpc.c r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr, eaddr 339 arch/powerpc/kvm/powerpc.c r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, eaddr 344 arch/powerpc/kvm/powerpc.c *eaddr = pte.raddr; eaddr 354 arch/powerpc/kvm/powerpc.c magic += pte.eaddr & 0xfff; eaddr 366 arch/powerpc/kvm/powerpc.c int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, eaddr 376 arch/powerpc/kvm/powerpc.c rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr, eaddr 382 arch/powerpc/kvm/powerpc.c rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, eaddr 387 arch/powerpc/kvm/powerpc.c *eaddr = pte.raddr; eaddr 400 arch/powerpc/kvm/powerpc.c magic += pte.eaddr & 0xfff; eaddr 39 arch/powerpc/kvm/trace_pr.h __field( unsigned long, eaddr ) eaddr 49 arch/powerpc/kvm/trace_pr.h __entry->eaddr = orig_pte->eaddr; eaddr 57 arch/powerpc/kvm/trace_pr.h __entry->flag_w, __entry->flag_x, __entry->eaddr, eaddr 70 arch/powerpc/kvm/trace_pr.h __field( ulong, eaddr ) eaddr 79 arch/powerpc/kvm/trace_pr.h __entry->eaddr = pte->pte.eaddr; eaddr 88 arch/powerpc/kvm/trace_pr.h __entry->host_vpn, __entry->pfn, __entry->eaddr, eaddr 99 arch/powerpc/kvm/trace_pr.h __field( ulong, eaddr ) eaddr 108 arch/powerpc/kvm/trace_pr.h __entry->eaddr = pte->pte.eaddr; eaddr 117 arch/powerpc/kvm/trace_pr.h __entry->host_vpn, __entry->pfn, __entry->eaddr, eaddr 501 arch/powerpc/platforms/pseries/ras.c unsigned long eaddr = 0, paddr = 0; eaddr 580 arch/powerpc/platforms/pseries/ras.c eaddr = be64_to_cpu(mce_log->effective_address); eaddr 587 arch/powerpc/platforms/pseries/ras.c pfn = addr_to_pfn(regs, eaddr); eaddr 608 arch/powerpc/platforms/pseries/ras.c eaddr = be64_to_cpu(mce_log->effective_address); eaddr 625 arch/powerpc/platforms/pseries/ras.c eaddr = be64_to_cpu(mce_log->effective_address); eaddr 642 arch/powerpc/platforms/pseries/ras.c eaddr = be64_to_cpu(mce_log->effective_address); eaddr 698 arch/powerpc/platforms/pseries/ras.c &mce_err, regs->nip, eaddr, paddr); eaddr 56 arch/sh/include/asm/tlb_64.h void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr, eaddr 34 arch/sh/mm/cache-sh5.c sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid, eaddr 38 arch/sh/mm/cache-sh5.c sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr); eaddr 87 arch/sh/mm/cache-sh5.c static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr) eaddr 95 arch/sh/mm/cache-sh5.c addr = eaddr; eaddr 159 arch/sh/mm/cache-sh5.c unsigned long eaddr; eaddr 188 arch/sh/mm/cache-sh5.c eaddr = aligned_start; eaddr 189 arch/sh/mm/cache-sh5.c while (eaddr < vma_end) { eaddr 190 arch/sh/mm/cache-sh5.c sh64_icache_inv_user_page(vma, eaddr); eaddr 191 arch/sh/mm/cache-sh5.c eaddr += PAGE_SIZE; eaddr 244 arch/sh/mm/cache-sh5.c unsigned long long eaddr, eaddr0, eaddr1; eaddr 268 arch/sh/mm/cache-sh5.c for (eaddr = eaddr0; eaddr < eaddr1; eaddr 269 arch/sh/mm/cache-sh5.c eaddr += cpu_data->dcache.way_size) { eaddr 270 arch/sh/mm/cache-sh5.c __asm__ __volatile__ ("alloco %0, 0" : : "r" (eaddr)); eaddr 277 arch/sh/mm/cache-sh5.c for (eaddr = eaddr0; eaddr < eaddr1; eaddr 278 arch/sh/mm/cache-sh5.c eaddr += cpu_data->dcache.way_size) { eaddr 284 arch/sh/mm/cache-sh5.c __raw_readb((unsigned long)eaddr); eaddr 326 arch/sh/mm/cache-sh5.c unsigned long eaddr) eaddr 331 arch/sh/mm/cache-sh5.c magic_page_start = MAGIC_PAGE0_START + (eaddr & CACHE_OC_SYN_MASK); eaddr 361 arch/sh/mm/cache-sh5.c unsigned long long eaddr_start, eaddr, eaddr_end; eaddr 370 arch/sh/mm/cache-sh5.c eaddr = eaddr_start; eaddr 371 arch/sh/mm/cache-sh5.c eaddr_end = eaddr + PAGE_SIZE; eaddr 372 arch/sh/mm/cache-sh5.c while (eaddr < eaddr_end) { eaddr 373 arch/sh/mm/cache-sh5.c __asm__ __volatile__ ("ocbp %0, 0" : : "r" (eaddr)); eaddr 374 arch/sh/mm/cache-sh5.c eaddr += L1_CACHE_BYTES; eaddr 552 arch/sh/mm/cache-sh5.c unsigned long eaddr, pfn; eaddr 555 arch/sh/mm/cache-sh5.c eaddr = data->addr1; eaddr 561 arch/sh/mm/cache-sh5.c sh64_icache_inv_user_page(vma, eaddr); eaddr 120 arch/sh/mm/tlb-sh5.c void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr, eaddr 125 arch/sh/mm/tlb-sh5.c pteh = neff_sign_extend(eaddr); eaddr 290 arch/unicore32/mm/alignment.c unsigned long eaddr, newaddr; eaddr 301 arch/unicore32/mm/alignment.c newaddr = eaddr = regs->uregs[rn]; eaddr 307 arch/unicore32/mm/alignment.c eaddr = newaddr; eaddr 310 arch/unicore32/mm/alignment.c eaddr += 4; eaddr 316 arch/unicore32/mm/alignment.c if (addr != eaddr) { eaddr 319 arch/unicore32/mm/alignment.c instruction_pointer(regs), instr, addr, eaddr); eaddr 333 arch/unicore32/mm/alignment.c uregs[rd + reg_correction], eaddr); eaddr 336 arch/unicore32/mm/alignment.c uregs[rd + reg_correction], eaddr); eaddr 337 arch/unicore32/mm/alignment.c eaddr += 4; eaddr 2123 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c uint64_t eaddr; eaddr 2131 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c eaddr = saddr + size - 1; eaddr 2132 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (saddr >= eaddr || eaddr 2137 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c eaddr /= AMDGPU_GPU_PAGE_SIZE; eaddr 2139 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); eaddr 2143 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr, eaddr 2153 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c mapping->last = eaddr; eaddr 2187 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c uint64_t eaddr; eaddr 2196 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c eaddr = saddr + size - 1; eaddr 2197 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (saddr >= eaddr || eaddr 2213 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c eaddr /= AMDGPU_GPU_PAGE_SIZE; eaddr 2216 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c mapping->last = eaddr; eaddr 2299 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c uint64_t eaddr; eaddr 2301 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c eaddr = saddr + size - 1; eaddr 2303 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c eaddr /= AMDGPU_GPU_PAGE_SIZE; eaddr 2319 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); eaddr 2332 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (tmp->last > eaddr) { eaddr 2333 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c after->start = eaddr + 1; eaddr 2345 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr); eaddr 2355 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (tmp->last > eaddr) eaddr 2356 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c tmp->last = eaddr; eaddr 164 drivers/mtd/nand/raw/atmel/pmecc.c u32 eaddr; eaddr 405 drivers/mtd/nand/raw/atmel/pmecc.c user->cache.eaddr = req->ecc.ooboffset + req->ecc.bytes - 1; eaddr 793 drivers/mtd/nand/raw/atmel/pmecc.c writel(user->cache.eaddr, pmecc->regs.base + ATMEL_PMECC_EADDR); eaddr 577 drivers/net/ethernet/amd/ni65.c p->ib.eaddr[i] = daddr[i]; eaddr 91 drivers/net/ethernet/amd/ni65.h unsigned char eaddr[6]; eaddr 2164 drivers/net/ethernet/broadcom/sb1250-mac.c unsigned char *eaddr; eaddr 2172 drivers/net/ethernet/broadcom/sb1250-mac.c eaddr = sc->sbm_hwaddr; eaddr 2182 drivers/net/ethernet/broadcom/sb1250-mac.c eaddr[i] = (uint8_t) (ea_reg & 0xFF); eaddr 2187 drivers/net/ethernet/broadcom/sb1250-mac.c dev->dev_addr[i] = eaddr[i]; eaddr 2258 drivers/net/ethernet/broadcom/sb1250-mac.c dev->name, base, eaddr); eaddr 179 drivers/slimbus/core.c struct slim_eaddr *eaddr, eaddr 189 drivers/slimbus/core.c sbdev->e_addr = *eaddr; eaddr 352 drivers/slimbus/core.c struct slim_eaddr *eaddr) eaddr 357 drivers/slimbus/core.c dev = device_find_child(ctrl->dev, eaddr, slim_match_dev); eaddr 82 fs/freevxfs/vxfs_olt.c char *oaddr, *eaddr; eaddr 105 fs/freevxfs/vxfs_olt.c eaddr = bp->b_data + (infp->vsi_oltsize * sbp->s_blocksize); eaddr 107 fs/freevxfs/vxfs_olt.c while (oaddr < eaddr) { eaddr 377 kernel/kexec_core.c unsigned long pfn, epfn, addr, eaddr; eaddr 385 kernel/kexec_core.c eaddr = epfn << PAGE_SHIFT; eaddr 387 kernel/kexec_core.c kimage_is_destination_range(image, addr, eaddr)) { eaddr 936 lib/debugobjects.c unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; eaddr 945 lib/debugobjects.c eaddr = saddr + size; eaddr 947 lib/debugobjects.c chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); eaddr 959 lib/debugobjects.c if (oaddr < saddr || oaddr >= eaddr) eaddr 607 tools/perf/util/probe-finder.c Dwarf_Addr eaddr; eaddr 618 tools/perf/util/probe-finder.c if (dwarf_entrypc(sp_die, &eaddr) == 0) { eaddr 624 tools/perf/util/probe-finder.c eaddr = sym.st_value; eaddr 632 tools/perf/util/probe-finder.c tp->offset = (unsigned long)(paddr - eaddr); eaddr 640 tools/perf/util/probe-finder.c if (eaddr != paddr) { eaddr 897 virt/kvm/arm/vgic/vgic-its.c gpa_t *eaddr) eaddr 930 virt/kvm/arm/vgic/vgic-its.c if (eaddr) eaddr 931 virt/kvm/arm/vgic/vgic-its.c *eaddr = addr; eaddr 961 virt/kvm/arm/vgic/vgic-its.c if (eaddr) eaddr 962 virt/kvm/arm/vgic/vgic-its.c *eaddr = indirect_ptr; eaddr 2357 virt/kvm/arm/vgic/vgic-its.c gpa_t eaddr; eaddr 2360 virt/kvm/arm/vgic/vgic-its.c dev->device_id, &eaddr)) eaddr 2367 virt/kvm/arm/vgic/vgic-its.c ret = vgic_its_save_dte(its, dev, eaddr, dte_esz);