ea 3857 arch/m68k/ifpsp060/src/fpsp.S # if the F-Line instruction is an "fmovecr" w/ a non-zero <ea>. if ea 19789 arch/m68k/ifpsp060/src/fpsp.S # if the ea is -() or ()+, need to know # of bytes. # ea 102 arch/m68k/math-emu/fp_decode.h | decode destination format for fmove reg,ea ea 107 arch/m68k/math-emu/fp_decode.h | decode source register for fmove reg,ea ea 91 arch/mips/alchemy/common/prom.c static inline void str2eaddr(unsigned char *ea, unsigned char *str) ea 102 arch/mips/alchemy/common/prom.c ea[i] = num; ea 91 arch/mips/pmcs-msp71xx/msp_prom.c int str2eaddr(unsigned char *ea, unsigned char *str) ea 98 arch/mips/pmcs-msp71xx/msp_prom.c ea[index++] = num; ea 108 arch/mips/pmcs-msp71xx/msp_prom.c ea[index++] = num; ea 49 arch/mips/sgi-ip32/ip32-setup.c static inline void str2eaddr(unsigned char *ea, unsigned char *str) ea 60 arch/mips/sgi-ip32/ip32-setup.c ea[i] = num; ea 59 arch/nios2/include/asm/elf.h pr_reg[21] = regs->ea; \ ea 61 arch/nios2/include/asm/entry.h stw ea, PT_EA(sp) ea 85 arch/nios2/include/asm/entry.h ldw ea, PT_EA(sp) ea 78 arch/nios2/include/asm/processor.h #define KSTK_EIP(tsk) ((tsk)->thread.kregs->ea) ea 44 arch/nios2/include/asm/ptrace.h unsigned long ea; /* Exception return address (pc) */ ea 44 arch/nios2/kernel/asm-offsets.c OFFSET(PT_EA, pt_regs, ea); ea 52 arch/nios2/kernel/kgdb.c { "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, ea) }, ea 101 arch/nios2/kernel/kgdb.c gdb_regs[GDB_PC] = p->thread.kregs->ea; ea 106 arch/nios2/kernel/kgdb.c regs->ea = pc; ea 122 arch/nios2/kernel/kgdb.c regs->ea = addr; ea 137 arch/nios2/kernel/kgdb.c regs->ea -= 4; ea 75 arch/nios2/kernel/misaligned.c fp->ea -= 4; ea 83 arch/nios2/kernel/misaligned.c isn = *(unsigned long *)(fp->ea); ea 156 arch/nios2/kernel/misaligned.c fp->ea, (unsigned int)addr, ea 160 arch/nios2/kernel/misaligned.c fp->ea, ea 164 arch/nios2/kernel/misaligned.c _exception(SIGSEGV, fp, SEGV_MAPERR, fp->ea); ea 174 arch/nios2/kernel/misaligned.c fp->ea += 4; ea 178 arch/nios2/kernel/misaligned.c fp->ea, ea 195 arch/nios2/kernel/misaligned.c fp->ea, fp->ra, fp->sp); ea 199 arch/nios2/kernel/misaligned.c _exception(SIGBUS, fp, BUS_ADRALN, fp->ea); ea 201 arch/nios2/kernel/misaligned.c fp->ea += 4; /* else advance */ ea 96 arch/nios2/kernel/process.c regs->ea, regs->estatus); ea 173 arch/nios2/kernel/process.c pr_emerg("PC: %08lx\n", fp->ea); ea 192 arch/nios2/kernel/process.c tp = ((unsigned char *) fp->ea) - 0x20; ea 251 arch/nios2/kernel/process.c regs->ea = pc; ea 54 arch/nios2/kernel/ptrace.c REG_O_ONE(®s->ea, PTR_EA); ea 57 arch/nios2/kernel/ptrace.c REG_O_ONE(®s->ea, PTR_PC); /* use ea for PC */ ea 100 arch/nios2/kernel/ptrace.c REG_IN_ONE(®s->ea, PTR_EA); ea 103 arch/nios2/kernel/ptrace.c REG_IN_ONE(®s->ea, PTR_PC); /* use ea for PC */ ea 83 arch/nios2/kernel/signal.c err |= __get_user(regs->ea, &gregs[27]); ea 160 arch/nios2/kernel/signal.c err |= __put_user(regs->ea, &gregs[27]); ea 210 arch/nios2/kernel/signal.c regs->ea = (unsigned long) ksig->ka.sa.sa_handler; ea 244 arch/nios2/kernel/signal.c continue_addr = regs->ea; ea 261 arch/nios2/kernel/signal.c regs->ea = restart_addr; ea 268 arch/nios2/kernel/signal.c if (unlikely(restart && regs->ea == restart_addr)) { ea 275 arch/nios2/kernel/signal.c regs->ea = continue_addr; ea 285 arch/nios2/kernel/signal.c if (unlikely(restart) && regs->ea == restart_addr) { ea 286 arch/nios2/kernel/signal.c regs->ea = continue_addr; ea 120 arch/nios2/kernel/traps.c fp->ea -= 4; ea 121 arch/nios2/kernel/traps.c _exception(SIGTRAP, fp, TRAP_BRKPT, fp->ea); ea 131 arch/nios2/kernel/traps.c fp->ea -= 4; ea 141 arch/nios2/kernel/traps.c pr_alert(" op-code 0x%08lx\n", *(unsigned long *)(fp->ea)); ea 153 arch/nios2/kernel/traps.c fp->ea -= 4; ea 154 arch/nios2/kernel/traps.c _exception(SIGILL, fp, ILL_ILLOPC, fp->ea); ea 160 arch/nios2/kernel/traps.c fp->ea -= 4; ea 161 arch/nios2/kernel/traps.c _exception(SIGILL, fp, ILL_PRVOPC, fp->ea); ea 167 arch/nios2/kernel/traps.c fp->ea -= 4; ea 168 arch/nios2/kernel/traps.c _exception(SIGFPE, fp, FPE_INTDIV, fp->ea); ea 181 arch/nios2/kernel/traps.c regs->ea -= 4; ea 184 arch/nios2/kernel/traps.c pr_emerg("opcode: 0x%08lx\n", *(unsigned long *)(regs->ea)); ea 189 arch/nios2/kernel/traps.c _send_sig(SIGUSR1, 0, fp->ea); ea 194 arch/nios2/kernel/traps.c _send_sig(SIGUSR2, 0, fp->ea); ea 199 arch/nios2/kernel/traps.c _send_sig(SIGILL, ILL_ILLTRP, fp->ea); ea 18 arch/nios2/mm/extable.c fixup = search_exception_tables(regs->ea); ea 20 arch/nios2/mm/extable.c regs->ea = fixup->fixup; ea 55 arch/nios2/mm/fault.c regs->ea -= 4; ea 87 arch/nios2/mm/fault.c if (!user_mode(regs) && !search_exception_tables(regs->ea)) ea 211 arch/nios2/mm/fault.c pr_alert("ea = %08lx, ra = %08lx, cause = %ld\n", regs->ea, regs->ra, ea 130 arch/openrisc/kernel/traps.c unsigned long ea, unsigned long vector) ea 135 arch/openrisc/kernel/traps.c printk("\n\r[nommu_dump_state] :: ea %lx, vector %lx\n\r", ea, vector); ea 233 arch/openrisc/kernel/traps.c void unhandled_exception(struct pt_regs *regs, int ea, int vector) ea 236 arch/openrisc/kernel/traps.c ea, vector); ea 84 arch/powerpc/include/asm/asm-prototypes.h long do_slb_fault(struct pt_regs *regs, unsigned long ea); ea 85 arch/powerpc/include/asm/asm-prototypes.h void do_bad_slb_fault(struct pt_regs *regs, unsigned long ea, long err); ea 86 arch/powerpc/include/asm/book3s/64/hash.h #define NON_LINEAR_REGION_ID(ea) ((((unsigned long)ea - H_KERN_VIRT_START) >> REGION_SHIFT) + 2) ea 115 arch/powerpc/include/asm/book3s/64/hash.h static inline int get_region_id(unsigned long ea) ea 118 arch/powerpc/include/asm/book3s/64/hash.h int id = (ea >> 60UL); ea 126 arch/powerpc/include/asm/book3s/64/hash.h if (ea < H_KERN_VIRT_START) ea 131 arch/powerpc/include/asm/book3s/64/hash.h region_id = NON_LINEAR_REGION_ID(ea); ea 247 arch/powerpc/include/asm/book3s/64/hash.h int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot); ea 146 arch/powerpc/include/asm/book3s/64/mmu-hash.h unsigned long ea, ea 156 arch/powerpc/include/asm/book3s/64/mmu-hash.h int (*hpte_removebolted)(unsigned long ea, ea 420 arch/powerpc/include/asm/book3s/64/mmu-hash.h static inline unsigned long hpt_vpn(unsigned long ea, ea 427 arch/powerpc/include/asm/book3s/64/mmu-hash.h return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask); ea 456 arch/powerpc/include/asm/book3s/64/mmu-hash.h extern int __hash_page_4K(unsigned long ea, unsigned long access, ea 459 arch/powerpc/include/asm/book3s/64/mmu-hash.h extern int __hash_page_64K(unsigned long ea, unsigned long access, ea 464 arch/powerpc/include/asm/book3s/64/mmu-hash.h extern int hash_page_mm(struct mm_struct *mm, unsigned long ea, ea 467 arch/powerpc/include/asm/book3s/64/mmu-hash.h extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap, ea 469 arch/powerpc/include/asm/book3s/64/mmu-hash.h int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, ea 473 arch/powerpc/include/asm/book3s/64/mmu-hash.h extern int __hash_page_thp(unsigned long ea, unsigned long access, ea 477 arch/powerpc/include/asm/book3s/64/mmu-hash.h static inline int __hash_page_thp(unsigned long ea, unsigned long access, ea 486 arch/powerpc/include/asm/book3s/64/mmu-hash.h extern void hash_failure_debug(unsigned long ea, unsigned long access, ea 766 arch/powerpc/include/asm/book3s/64/mmu-hash.h static inline unsigned long get_vsid(unsigned long context, unsigned long ea, ea 776 arch/powerpc/include/asm/book3s/64/mmu-hash.h if ((ea & EA_MASK) >= H_PGTABLE_RANGE) ea 785 arch/powerpc/include/asm/book3s/64/mmu-hash.h ((ea >> SID_SHIFT) & ESID_BITS_MASK); ea 791 arch/powerpc/include/asm/book3s/64/mmu-hash.h ((ea >> SID_SHIFT_1T) & ESID_BITS_1T_MASK); ea 811 arch/powerpc/include/asm/book3s/64/mmu-hash.h static inline unsigned long get_kernel_context(unsigned long ea) ea 813 arch/powerpc/include/asm/book3s/64/mmu-hash.h unsigned long region_id = get_region_id(ea); ea 823 arch/powerpc/include/asm/book3s/64/mmu-hash.h ctx = 1 + ((ea & EA_MASK) >> MAX_EA_BITS_PER_CONTEXT); ea 832 arch/powerpc/include/asm/book3s/64/mmu-hash.h static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) ea 836 arch/powerpc/include/asm/book3s/64/mmu-hash.h if (!is_kernel_addr(ea)) ea 839 arch/powerpc/include/asm/book3s/64/mmu-hash.h context = get_kernel_context(ea); ea 840 arch/powerpc/include/asm/book3s/64/mmu-hash.h return get_vsid(context, ea, ssize); ea 246 arch/powerpc/include/asm/book3s/64/mmu.h static inline int get_user_context(mm_context_t *ctx, unsigned long ea) ea 248 arch/powerpc/include/asm/book3s/64/mmu.h int index = ea >> MAX_EA_BITS_PER_CONTEXT; ea 259 arch/powerpc/include/asm/book3s/64/mmu.h unsigned long ea, int ssize) ea 261 arch/powerpc/include/asm/book3s/64/mmu.h unsigned long context = get_user_context(ctx, ea); ea 263 arch/powerpc/include/asm/book3s/64/mmu.h return get_vsid(context, ea, ssize); ea 1037 arch/powerpc/include/asm/book3s/64/pgtable.h static inline int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) ea 1044 arch/powerpc/include/asm/book3s/64/pgtable.h return radix__map_kernel_page(ea, pa, prot, PAGE_SIZE); ea 1046 arch/powerpc/include/asm/book3s/64/pgtable.h return hash__map_kernel_page(ea, pa, prot); ea 277 arch/powerpc/include/asm/book3s/64/radix.h extern int radix__map_kernel_page(unsigned long ea, unsigned long pa, ea 16 arch/powerpc/include/asm/copro.h int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, ea 19 arch/powerpc/include/asm/copro.h int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb); ea 725 arch/powerpc/include/asm/io.h int early_ioremap_range(unsigned long ea, phys_addr_t pa, ea 733 arch/powerpc/include/asm/io.h extern void __iomem * __ioremap_at(phys_addr_t pa, void *ea, ea 735 arch/powerpc/include/asm/io.h extern void __iounmap_at(void *ea, unsigned long size); ea 13 arch/powerpc/include/asm/isa-bridge.h unsigned long ea = (unsigned long)address; ea 14 arch/powerpc/include/asm/isa-bridge.h return ea >= ISA_IO_BASE && ea < ISA_IO_END; ea 145 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask); ea 164 arch/powerpc/include/asm/kvm_book3s.h unsigned long gpa, gva_t ea, int is_store); ea 179 arch/powerpc/include/asm/kvm_book3s.h unsigned long ea, unsigned long dsisr); ea 1028 arch/powerpc/include/asm/kvm_ppc.h ulong ea; ea 1031 arch/powerpc/include/asm/kvm_ppc.h ea = kvmppc_get_gpr(vcpu, rb); ea 1033 arch/powerpc/include/asm/kvm_ppc.h ea += kvmppc_get_gpr(vcpu, ra); ea 1042 arch/powerpc/include/asm/kvm_ppc.h ea = (uint32_t)ea; ea 1044 arch/powerpc/include/asm/kvm_ppc.h return ea; ea 78 arch/powerpc/include/asm/mmu_context.h unsigned long ea) ea 82 arch/powerpc/include/asm/mmu_context.h int index = ea >> MAX_EA_BITS_PER_CONTEXT; ea 93 arch/powerpc/include/asm/mmu_context.h static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea) ea 97 arch/powerpc/include/asm/mmu_context.h context_id = get_user_context(&mm->context, ea); ea 110 arch/powerpc/include/asm/mmu_context.h unsigned long ea) ea 117 arch/powerpc/include/asm/mmu_context.h static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea) ea 65 arch/powerpc/include/asm/nohash/64/pgtable.h #define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT) ea 358 arch/powerpc/include/asm/nohash/64/pgtable.h int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot); ea 7 arch/powerpc/include/asm/pte-walk.h extern pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, ea 10 arch/powerpc/include/asm/pte-walk.h static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea, ea 16 arch/powerpc/include/asm/pte-walk.h pte = __find_linux_pte(pgdir, ea, is_thp, hshift); ea 29 arch/powerpc/include/asm/pte-walk.h static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift) ea 32 arch/powerpc/include/asm/pte-walk.h return __find_linux_pte(pgdir, ea, NULL, hshift); ea 38 arch/powerpc/include/asm/pte-walk.h static inline pte_t *find_current_mm_pte(pgd_t *pgdir, unsigned long ea, ea 46 arch/powerpc/include/asm/pte-walk.h pte = __find_linux_pte(pgdir, ea, is_thp, hshift); ea 105 arch/powerpc/include/asm/sstep.h unsigned long ea; ea 168 arch/powerpc/include/asm/sstep.h extern int emulate_dcbz(unsigned long ea, struct pt_regs *regs); ea 346 arch/powerpc/kernel/align.c r = emulate_dcbz(op.ea, regs); ea 329 arch/powerpc/kernel/mce.c uint64_t ea = 0, pa = 0; ea 443 arch/powerpc/kernel/mce.c ea = evt->u.ue_error.effective_address; ea 454 arch/powerpc/kernel/mce.c ea = evt->u.slb_error.effective_address; ea 463 arch/powerpc/kernel/mce.c ea = evt->u.erat_error.effective_address; ea 472 arch/powerpc/kernel/mce.c ea = evt->u.tlb_error.effective_address; ea 481 arch/powerpc/kernel/mce.c ea = evt->u.user_error.effective_address; ea 490 arch/powerpc/kernel/mce.c ea = evt->u.ra_error.effective_address; ea 499 arch/powerpc/kernel/mce.c ea = evt->u.link_error.effective_address; ea 517 arch/powerpc/kernel/mce.c if (ea && evt->srr0 != ea) { ea 519 arch/powerpc/kernel/mce.c n = sprintf(dar_str, "DAR: %016llx ", ea); ea 378 arch/powerpc/kernel/mce_power.c pfn = addr_to_pfn(regs, op.ea); ea 379 arch/powerpc/kernel/mce_power.c *addr = op.ea; ea 881 arch/powerpc/kernel/traps.c unsigned long ea, msr, msr_mask; ea 922 arch/powerpc/kernel/traps.c ea = regs->gpr[rb] + (ra ? regs->gpr[ra] : 0); ea 924 arch/powerpc/kernel/traps.c ea &= 0xfffffffful; ea 925 arch/powerpc/kernel/traps.c addr = (__force const void __user *)ea; ea 343 arch/powerpc/kvm/book3s_32_mmu.c static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large) ea 350 arch/powerpc/kvm/book3s_32_mmu.c kvmppc_mmu_pte_flush(v, ea, 0x0FFFF000); ea 356 arch/powerpc/kvm/book3s_32_mmu.c ulong ea = esid << SID_SHIFT; ea 362 arch/powerpc/kvm/book3s_32_mmu.c sr = find_sr(vcpu, ea); ea 464 arch/powerpc/kvm/book3s_64_mmu.c static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea) ea 469 arch/powerpc/kvm/book3s_64_mmu.c dprintk("KVM MMU: slbie(0x%llx)\n", ea); ea 471 arch/powerpc/kvm/book3s_64_mmu.c slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea); ea 476 arch/powerpc/kvm/book3s_64_mmu.c dprintk("KVM MMU: slbie(0x%llx, 0x%llx)\n", ea, slbe->esid); ea 483 arch/powerpc/kvm/book3s_64_mmu.c kvmppc_mmu_flush_segment(vcpu, ea & ~(seg_size - 1), seg_size); ea 590 arch/powerpc/kvm/book3s_64_mmu.c ulong ea = esid << SID_SHIFT; ea 598 arch/powerpc/kvm/book3s_64_mmu.c slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea); ea 356 arch/powerpc/kvm/book3s_64_mmu_host.c void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size) ea 364 arch/powerpc/kvm/book3s_64_mmu_host.c (svcpu->slb[i].esid & seg_mask) == ea) { ea 332 arch/powerpc/kvm/book3s_64_mmu_hv.c unsigned long ea) ea 337 arch/powerpc/kvm/book3s_64_mmu_hv.c return (r & HPTE_R_RPN & ~ra_mask) | (ea & ra_mask); ea 429 arch/powerpc/kvm/book3s_64_mmu_hv.c unsigned long gpa, gva_t ea, int is_store) ea 487 arch/powerpc/kvm/book3s_64_mmu_hv.c vcpu->arch.vaddr_accessed = ea; ea 492 arch/powerpc/kvm/book3s_64_mmu_hv.c unsigned long ea, unsigned long dsisr) ea 513 arch/powerpc/kvm/book3s_64_mmu_hv.c return kvmppc_book3s_radix_page_fault(run, vcpu, ea, dsisr); ea 521 arch/powerpc/kvm/book3s_64_mmu_hv.c if (ea != vcpu->arch.pgfault_addr) ea 532 arch/powerpc/kvm/book3s_64_mmu_hv.c gpa = gpa_base | (ea & (psize - 1)); ea 533 arch/powerpc/kvm/book3s_64_mmu_hv.c return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, ea 561 arch/powerpc/kvm/book3s_64_mmu_hv.c gpa = gpa_base | (ea & (psize - 1)); ea 565 arch/powerpc/kvm/book3s_64_mmu_hv.c trace_kvm_page_fault_enter(vcpu, hpte, memslot, ea, dsisr); ea 569 arch/powerpc/kvm/book3s_64_mmu_hv.c return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, ea 890 arch/powerpc/kvm/book3s_64_mmu_radix.c unsigned long ea, unsigned long dsisr) ea 907 arch/powerpc/kvm/book3s_64_mmu_radix.c kvmppc_core_queue_data_storage(vcpu, ea, dsisr); ea 916 arch/powerpc/kvm/book3s_64_mmu_radix.c gpa |= ea & 0xfff; ea 929 arch/powerpc/kvm/book3s_64_mmu_radix.c kvmppc_core_queue_data_storage(vcpu, ea, dsisr); ea 932 arch/powerpc/kvm/book3s_64_mmu_radix.c return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, writing); ea 938 arch/powerpc/kvm/book3s_64_mmu_radix.c kvmppc_core_queue_data_storage(vcpu, ea, DSISR_ISSTORE | ea 1271 arch/powerpc/kvm/book3s_hv_nested.c unsigned long ea = vcpu->arch.fault_dar; ea 1289 arch/powerpc/kvm/book3s_hv_nested.c n_gpa |= ea & 0xFFF; ea 1339 arch/powerpc/kvm/book3s_hv_nested.c kvmppc_core_queue_data_storage(vcpu, ea, dsisr); ea 1344 arch/powerpc/kvm/book3s_hv_nested.c return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, writing); ea 1349 arch/powerpc/kvm/book3s_hv_nested.c kvmppc_core_queue_data_storage(vcpu, ea, ea 129 arch/powerpc/kvm/e500.h int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea); ea 130 arch/powerpc/kvm/e500.h int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int type, gva_t ea); ea 131 arch/powerpc/kvm/e500.h int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea); ea 135 arch/powerpc/kvm/e500_emulate.c gva_t ea; ea 164 arch/powerpc/kvm/e500_emulate.c ea = kvmppc_get_ea_indexed(vcpu, ra, rb); ea 165 arch/powerpc/kvm/e500_emulate.c emulated = kvmppc_e500_emul_tlbsx(vcpu, ea); ea 170 arch/powerpc/kvm/e500_emulate.c ea = kvmppc_get_ea_indexed(vcpu, ra, rb); ea 171 arch/powerpc/kvm/e500_emulate.c emulated = kvmppc_e500_emul_tlbilx(vcpu, type, ea); ea 176 arch/powerpc/kvm/e500_emulate.c ea = kvmppc_get_ea_indexed(vcpu, ra, rb); ea 177 arch/powerpc/kvm/e500_emulate.c emulated = kvmppc_e500_emul_tlbivax(vcpu, ea); ea 245 arch/powerpc/kvm/e500_mmu.c int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea) ea 251 arch/powerpc/kvm/e500_mmu.c ia = (ea >> 2) & 0x1; ea 254 arch/powerpc/kvm/e500_mmu.c tlbsel = (ea >> 3) & 0x1; ea 262 arch/powerpc/kvm/e500_mmu.c ea &= 0xfffff000; ea 263 arch/powerpc/kvm/e500_mmu.c esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, ea 293 arch/powerpc/kvm/e500_mmu.c gva_t ea) ea 298 arch/powerpc/kvm/e500_mmu.c esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, -1); ea 307 arch/powerpc/kvm/e500_mmu.c int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int type, gva_t ea) ea 316 arch/powerpc/kvm/e500_mmu.c tlbilx_one(vcpu_e500, pid, ea); ea 341 arch/powerpc/kvm/e500_mmu.c int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea) ea 350 arch/powerpc/kvm/e500_mmu.c esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as); ea 114 arch/powerpc/kvm/emulate_loadstore.c kvmppc_set_gpr(vcpu, op.update_reg, op.ea); ea 134 arch/powerpc/kvm/emulate_loadstore.c kvmppc_set_gpr(vcpu, op.update_reg, op.ea); ea 234 arch/powerpc/kvm/emulate_loadstore.c kvmppc_set_gpr(vcpu, op.update_reg, op.ea); ea 257 arch/powerpc/kvm/emulate_loadstore.c kvmppc_set_gpr(vcpu, op.update_reg, op.ea); ea 274 arch/powerpc/kvm/trace_hv.h struct kvm_memory_slot *memslot, unsigned long ea, ea 277 arch/powerpc/kvm/trace_hv.h TP_ARGS(vcpu, hptep, memslot, ea, dsisr), ea 284 arch/powerpc/kvm/trace_hv.h __field(unsigned long, ea) ea 295 arch/powerpc/kvm/trace_hv.h __entry->ea = ea; ea 304 arch/powerpc/kvm/trace_hv.h __entry->ea, __entry->dsisr, ea 51 arch/powerpc/lib/sstep.c extern int do_lq(unsigned long ea, unsigned long *regs); ea 52 arch/powerpc/lib/sstep.c extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1); ea 53 arch/powerpc/lib/sstep.c extern int do_lqarx(unsigned long ea, unsigned long *regs); ea 54 arch/powerpc/lib/sstep.c extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1, ea 105 arch/powerpc/lib/sstep.c unsigned long ea, int nb) ea 109 arch/powerpc/lib/sstep.c if (__access_ok(ea, nb, USER_DS)) ea 111 arch/powerpc/lib/sstep.c if (__access_ok(ea, 1, USER_DS)) ea 115 arch/powerpc/lib/sstep.c regs->dar = ea; ea 126 arch/powerpc/lib/sstep.c unsigned long ea; ea 129 arch/powerpc/lib/sstep.c ea = (signed short) instr; /* sign-extend */ ea 131 arch/powerpc/lib/sstep.c ea += regs->gpr[ra]; ea 133 arch/powerpc/lib/sstep.c return ea; ea 144 arch/powerpc/lib/sstep.c unsigned long ea; ea 147 arch/powerpc/lib/sstep.c ea = (signed short) (instr & ~3); /* sign-extend */ ea 149 arch/powerpc/lib/sstep.c ea += regs->gpr[ra]; ea 151 arch/powerpc/lib/sstep.c return ea; ea 161 arch/powerpc/lib/sstep.c unsigned long ea; ea 164 arch/powerpc/lib/sstep.c ea = (signed short) (instr & ~0xf); /* sign-extend */ ea 166 arch/powerpc/lib/sstep.c ea += regs->gpr[ra]; ea 168 arch/powerpc/lib/sstep.c return ea; ea 179 arch/powerpc/lib/sstep.c unsigned long ea; ea 183 arch/powerpc/lib/sstep.c ea = regs->gpr[rb]; ea 185 arch/powerpc/lib/sstep.c ea += regs->gpr[ra]; ea 187 arch/powerpc/lib/sstep.c return ea; ea 246 arch/powerpc/lib/sstep.c unsigned long ea, int nb, ea 254 arch/powerpc/lib/sstep.c err = __get_user(x, (unsigned char __user *) ea); ea 257 arch/powerpc/lib/sstep.c err = __get_user(x, (unsigned short __user *) ea); ea 260 arch/powerpc/lib/sstep.c err = __get_user(x, (unsigned int __user *) ea); ea 264 arch/powerpc/lib/sstep.c err = __get_user(x, (unsigned long __user *) ea); ea 271 arch/powerpc/lib/sstep.c regs->dar = ea; ea 279 arch/powerpc/lib/sstep.c static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb, ea 286 arch/powerpc/lib/sstep.c c = max_align(ea); ea 291 arch/powerpc/lib/sstep.c err = __get_user(*dest, (unsigned char __user *) ea); ea 295 arch/powerpc/lib/sstep.c (unsigned short __user *) ea); ea 299 arch/powerpc/lib/sstep.c (unsigned int __user *) ea); ea 304 arch/powerpc/lib/sstep.c (unsigned long __user *) ea); ea 309 arch/powerpc/lib/sstep.c regs->dar = ea; ea 313 arch/powerpc/lib/sstep.c ea += c; ea 319 arch/powerpc/lib/sstep.c unsigned long ea, int nb, ea 331 arch/powerpc/lib/sstep.c err = copy_mem_in(&u.b[i], ea, nb, regs); ea 342 arch/powerpc/lib/sstep.c static int read_mem(unsigned long *dest, unsigned long ea, int nb, ea 345 arch/powerpc/lib/sstep.c if (!address_ok(regs, ea, nb)) ea 347 arch/powerpc/lib/sstep.c if ((ea & (nb - 1)) == 0) ea 348 arch/powerpc/lib/sstep.c return read_mem_aligned(dest, ea, nb, regs); ea 349 arch/powerpc/lib/sstep.c return read_mem_unaligned(dest, ea, nb, regs); ea 354 arch/powerpc/lib/sstep.c unsigned long ea, int nb, ea 361 arch/powerpc/lib/sstep.c err = __put_user(val, (unsigned char __user *) ea); ea 364 arch/powerpc/lib/sstep.c err = __put_user(val, (unsigned short __user *) ea); ea 367 arch/powerpc/lib/sstep.c err = __put_user(val, (unsigned int __user *) ea); ea 371 arch/powerpc/lib/sstep.c err = __put_user(val, (unsigned long __user *) ea); ea 376 arch/powerpc/lib/sstep.c regs->dar = ea; ea 384 arch/powerpc/lib/sstep.c static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb, ea 391 arch/powerpc/lib/sstep.c c = max_align(ea); ea 396 arch/powerpc/lib/sstep.c err = __put_user(*dest, (unsigned char __user *) ea); ea 400 arch/powerpc/lib/sstep.c (unsigned short __user *) ea); ea 404 arch/powerpc/lib/sstep.c (unsigned int __user *) ea); ea 409 arch/powerpc/lib/sstep.c (unsigned long __user *) ea); ea 414 arch/powerpc/lib/sstep.c regs->dar = ea; ea 418 arch/powerpc/lib/sstep.c ea += c; ea 424 arch/powerpc/lib/sstep.c unsigned long ea, int nb, ea 435 arch/powerpc/lib/sstep.c return copy_mem_out(&u.b[i], ea, nb, regs); ea 442 arch/powerpc/lib/sstep.c static int write_mem(unsigned long val, unsigned long ea, int nb, ea 445 arch/powerpc/lib/sstep.c if (!address_ok(regs, ea, nb)) ea 447 arch/powerpc/lib/sstep.c if ((ea & (nb - 1)) == 0) ea 448 arch/powerpc/lib/sstep.c return write_mem_aligned(val, ea, nb, regs); ea 449 arch/powerpc/lib/sstep.c return write_mem_unaligned(val, ea, nb, regs); ea 458 arch/powerpc/lib/sstep.c static int do_fp_load(struct instruction_op *op, unsigned long ea, ea 472 arch/powerpc/lib/sstep.c if (!address_ok(regs, ea, nb)) ea 475 arch/powerpc/lib/sstep.c err = copy_mem_in(u.b, ea, nb, regs); ea 509 arch/powerpc/lib/sstep.c static int do_fp_store(struct instruction_op *op, unsigned long ea, ea 522 arch/powerpc/lib/sstep.c if (!address_ok(regs, ea, nb)) ea 549 arch/powerpc/lib/sstep.c return copy_mem_out(u.b, ea, nb, regs); ea 556 arch/powerpc/lib/sstep.c static nokprobe_inline int do_vec_load(int rn, unsigned long ea, ea 566 arch/powerpc/lib/sstep.c if (!address_ok(regs, ea & ~0xfUL, 16)) ea 569 arch/powerpc/lib/sstep.c ea &= ~(size - 1); ea 570 arch/powerpc/lib/sstep.c err = copy_mem_in(&u.b[ea & 0xf], ea, size, regs); ea 574 arch/powerpc/lib/sstep.c do_byte_reverse(&u.b[ea & 0xf], size); ea 584 arch/powerpc/lib/sstep.c static nokprobe_inline int do_vec_store(int rn, unsigned long ea, ea 593 arch/powerpc/lib/sstep.c if (!address_ok(regs, ea & ~0xfUL, 16)) ea 596 arch/powerpc/lib/sstep.c ea &= ~(size - 1); ea 605 arch/powerpc/lib/sstep.c do_byte_reverse(&u.b[ea & 0xf], size); ea 606 arch/powerpc/lib/sstep.c return copy_mem_out(&u.b[ea & 0xf], ea, size, regs); ea 611 arch/powerpc/lib/sstep.c static nokprobe_inline int emulate_lq(struct pt_regs *regs, unsigned long ea, ea 616 arch/powerpc/lib/sstep.c if (!address_ok(regs, ea, 16)) ea 619 arch/powerpc/lib/sstep.c if ((ea & 0xf) == 0) { ea 620 arch/powerpc/lib/sstep.c err = do_lq(ea, ®s->gpr[reg]); ea 622 arch/powerpc/lib/sstep.c err = read_mem(®s->gpr[reg + IS_LE], ea, 8, regs); ea 624 arch/powerpc/lib/sstep.c err = read_mem(®s->gpr[reg + IS_BE], ea + 8, 8, regs); ea 631 arch/powerpc/lib/sstep.c static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea, ea 637 arch/powerpc/lib/sstep.c if (!address_ok(regs, ea, 16)) ea 645 arch/powerpc/lib/sstep.c if ((ea & 0xf) == 0) ea 646 arch/powerpc/lib/sstep.c return do_stq(ea, vals[0], vals[1]); ea 648 arch/powerpc/lib/sstep.c err = write_mem(vals[IS_LE], ea, 8, regs); ea 650 arch/powerpc/lib/sstep.c err = write_mem(vals[IS_BE], ea + 8, 8, regs); ea 817 arch/powerpc/lib/sstep.c unsigned long ea, struct pt_regs *regs, ea 825 arch/powerpc/lib/sstep.c if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs)) ea 849 arch/powerpc/lib/sstep.c unsigned long ea, struct pt_regs *regs, ea 857 arch/powerpc/lib/sstep.c if (!address_ok(regs, ea, size)) ea 877 arch/powerpc/lib/sstep.c return copy_mem_out(mem, ea, size, regs); ea 881 arch/powerpc/lib/sstep.c int emulate_dcbz(unsigned long ea, struct pt_regs *regs) ea 889 arch/powerpc/lib/sstep.c ea &= 0xffffffffUL; ea 893 arch/powerpc/lib/sstep.c ea &= ~(size - 1); ea 894 arch/powerpc/lib/sstep.c if (!address_ok(regs, ea, size)) ea 897 arch/powerpc/lib/sstep.c err = __put_user(0, (unsigned long __user *) (ea + i)); ea 899 arch/powerpc/lib/sstep.c regs->dar = ea; ea 1982 arch/powerpc/lib/sstep.c op->ea = xform_ea(instr, regs); ea 1987 arch/powerpc/lib/sstep.c op->ea = xform_ea(instr, regs); ea 1992 arch/powerpc/lib/sstep.c op->ea = xform_ea(instr, regs); ea 1998 arch/powerpc/lib/sstep.c op->ea = xform_ea(instr, regs); ea 2004 arch/powerpc/lib/sstep.c op->ea = xform_ea(instr, regs); ea 2009 arch/powerpc/lib/sstep.c op->ea = xform_ea(instr, regs); ea 2028 arch/powerpc/lib/sstep.c op->ea = xform_ea(instr, regs); ea 2193 arch/powerpc/lib/sstep.c op->ea = ra ? regs->gpr[ra] : 0; ea 2260 arch/powerpc/lib/sstep.c op->ea = ra ? regs->gpr[ra] : 0; ea 2302 arch/powerpc/lib/sstep.c op->ea = ra ? regs->gpr[ra] : 0; ea 2337 arch/powerpc/lib/sstep.c op->ea = ra ? regs->gpr[ra] : 0; ea 2460 arch/powerpc/lib/sstep.c op->ea = dform_ea(instr, regs); ea 2466 arch/powerpc/lib/sstep.c op->ea = dform_ea(instr, regs); ea 2472 arch/powerpc/lib/sstep.c op->ea = dform_ea(instr, regs); ea 2478 arch/powerpc/lib/sstep.c op->ea = dform_ea(instr, regs); ea 2484 arch/powerpc/lib/sstep.c op->ea = dform_ea(instr, regs); ea 2490 arch/powerpc/lib/sstep.c op->ea = dform_ea(instr, regs); ea 2496 arch/powerpc/lib/sstep.c op->ea = dform_ea(instr, regs); ea 2503 arch/powerpc/lib/sstep.c op->ea = dform_ea(instr, regs); ea 2508 arch/powerpc/lib/sstep.c op->ea = dform_ea(instr, regs); ea 2515 arch/powerpc/lib/sstep.c op->ea = dform_ea(instr, regs); ea 2521 arch/powerpc/lib/sstep.c op->ea = dform_ea(instr, regs); ea 2527 arch/powerpc/lib/sstep.c op->ea = dform_ea(instr, regs); ea 2533 arch/powerpc/lib/sstep.c op->ea = dform_ea(instr, regs); ea 2541 arch/powerpc/lib/sstep.c op->ea = dqform_ea(instr, regs); ea 2547 arch/powerpc/lib/sstep.c op->ea = dsform_ea(instr, regs); ea 2572 arch/powerpc/lib/sstep.c op->ea = dsform_ea(instr, regs); ea 2592 arch/powerpc/lib/sstep.c op->ea = dsform_ea(instr, regs); ea 2597 arch/powerpc/lib/sstep.c op->ea = dqform_ea(instr, regs); ea 2607 arch/powerpc/lib/sstep.c op->ea = dsform_ea(instr, regs); ea 2616 arch/powerpc/lib/sstep.c op->ea = dsform_ea(instr, regs); ea 2624 arch/powerpc/lib/sstep.c op->ea = dqform_ea(instr, regs); ea 2637 arch/powerpc/lib/sstep.c op->ea = dsform_ea(instr, regs); ea 2702 arch/powerpc/lib/sstep.c static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs) ea 2708 arch/powerpc/lib/sstep.c if (ea - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) { ea 2853 arch/powerpc/lib/sstep.c unsigned long ea; ea 2860 arch/powerpc/lib/sstep.c ea = truncate_if_32bit(regs->msr, op->ea); ea 2864 arch/powerpc/lib/sstep.c if (ea & (size - 1)) ea 2866 arch/powerpc/lib/sstep.c if (!address_ok(regs, ea, size)) ea 2873 arch/powerpc/lib/sstep.c __get_user_asmx(val, ea, err, "lbarx"); ea 2876 arch/powerpc/lib/sstep.c __get_user_asmx(val, ea, err, "lharx"); ea 2880 arch/powerpc/lib/sstep.c __get_user_asmx(val, ea, err, "lwarx"); ea 2884 arch/powerpc/lib/sstep.c __get_user_asmx(val, ea, err, "ldarx"); ea 2887 arch/powerpc/lib/sstep.c err = do_lqarx(ea, ®s->gpr[op->reg]); ea 2894 arch/powerpc/lib/sstep.c regs->dar = ea; ea 2902 arch/powerpc/lib/sstep.c if (ea & (size - 1)) ea 2904 arch/powerpc/lib/sstep.c if (!address_ok(regs, ea, size)) ea 2910 arch/powerpc/lib/sstep.c __put_user_asmx(op->val, ea, err, "stbcx.", cr); ea 2913 arch/powerpc/lib/sstep.c __put_user_asmx(op->val, ea, err, "stbcx.", cr); ea 2917 arch/powerpc/lib/sstep.c __put_user_asmx(op->val, ea, err, "stwcx.", cr); ea 2921 arch/powerpc/lib/sstep.c __put_user_asmx(op->val, ea, err, "stdcx.", cr); ea 2924 arch/powerpc/lib/sstep.c err = do_stqcx(ea, regs->gpr[op->reg], ea 2936 arch/powerpc/lib/sstep.c regs->dar = ea; ea 2942 arch/powerpc/lib/sstep.c err = emulate_lq(regs, ea, op->reg, cross_endian); ea 2946 arch/powerpc/lib/sstep.c err = read_mem(®s->gpr[op->reg], ea, size, regs); ea 2965 arch/powerpc/lib/sstep.c err = do_fp_load(op, ea, regs, cross_endian); ea 2972 arch/powerpc/lib/sstep.c err = do_vec_load(op->reg, ea, size, regs, cross_endian); ea 2987 arch/powerpc/lib/sstep.c err = do_vsx_load(op, ea, regs, cross_endian); ea 2992 arch/powerpc/lib/sstep.c if (!address_ok(regs, ea, size)) ea 3001 arch/powerpc/lib/sstep.c err = copy_mem_in((u8 *) &v32, ea, nb, regs); ea 3007 arch/powerpc/lib/sstep.c ea += 4; ea 3016 arch/powerpc/lib/sstep.c err = emulate_stq(regs, ea, op->reg, cross_endian); ea 3023 arch/powerpc/lib/sstep.c ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) { ea 3024 arch/powerpc/lib/sstep.c err = handle_stack_update(ea, regs); ea 3029 arch/powerpc/lib/sstep.c err = write_mem(op->val, ea, size, regs); ea 3036 arch/powerpc/lib/sstep.c err = do_fp_store(op, ea, regs, cross_endian); ea 3043 arch/powerpc/lib/sstep.c err = do_vec_store(op->reg, ea, size, regs, cross_endian); ea 3058 arch/powerpc/lib/sstep.c err = do_vsx_store(op, ea, regs, cross_endian); ea 3063 arch/powerpc/lib/sstep.c if (!address_ok(regs, ea, size)) ea 3074 arch/powerpc/lib/sstep.c err = copy_mem_out((u8 *) &v32, ea, nb, regs); ea 3077 arch/powerpc/lib/sstep.c ea += 4; ea 3091 arch/powerpc/lib/sstep.c regs->gpr[op->update_reg] = op->ea; ea 3109 arch/powerpc/lib/sstep.c unsigned long ea; ea 3131 arch/powerpc/lib/sstep.c ea = truncate_if_32bit(regs->msr, op.ea); ea 3132 arch/powerpc/lib/sstep.c if (!address_ok(regs, ea, 8)) ea 3136 arch/powerpc/lib/sstep.c __cacheop_user_asmx(ea, err, "dcbst"); ea 3139 arch/powerpc/lib/sstep.c __cacheop_user_asmx(ea, err, "dcbf"); ea 3143 arch/powerpc/lib/sstep.c prefetchw((void *) ea); ea 3147 arch/powerpc/lib/sstep.c prefetch((void *) ea); ea 3150 arch/powerpc/lib/sstep.c __cacheop_user_asmx(ea, err, "icbi"); ea 3153 arch/powerpc/lib/sstep.c err = emulate_dcbz(ea, regs); ea 3157 arch/powerpc/lib/sstep.c regs->dar = ea; ea 10 arch/powerpc/math-emu/lfd.c lfd(void *frD, void *ea) ea 12 arch/powerpc/math-emu/lfd.c if (copy_from_user(frD, ea, sizeof(double))) ea 15 arch/powerpc/math-emu/lfd.c printk("%s: D %p, ea %p: ", __func__, frD, ea); ea 12 arch/powerpc/math-emu/lfs.c lfs(void *frD, void *ea) ea 20 arch/powerpc/math-emu/lfs.c printk("%s: D %p, ea %p\n", __func__, frD, ea); ea 23 arch/powerpc/math-emu/lfs.c if (copy_from_user(&f, ea, sizeof(float))) ea 7 arch/powerpc/math-emu/stfd.c stfd(void *frS, void *ea) ea 11 arch/powerpc/math-emu/stfd.c printk("%s: S %p, ea %p: ", __func__, frS, ea); ea 17 arch/powerpc/math-emu/stfd.c if (copy_to_user(ea, frS, sizeof(double))) ea 7 arch/powerpc/math-emu/stfiwx.c stfiwx(u32 *frS, void *ea) ea 10 arch/powerpc/math-emu/stfiwx.c printk("%s: %p %p\n", __func__, frS, ea); ea 13 arch/powerpc/math-emu/stfiwx.c if (copy_to_user(ea, &frS[1], sizeof(frS[1]))) ea 12 arch/powerpc/math-emu/stfs.c stfs(void *frS, void *ea) ea 20 arch/powerpc/math-emu/stfs.c printk("%s: S %p, ea %p\n", __func__, frS, ea); ea 38 arch/powerpc/math-emu/stfs.c if (copy_to_user(ea, &f, sizeof(float))) ea 300 arch/powerpc/mm/book3s32/mmu.c void hash_preload(struct mm_struct *mm, unsigned long ea) ea 306 arch/powerpc/mm/book3s32/mmu.c pmd = pmd_offset(pud_offset(pgd_offset(mm, ea), ea), ea); ea 308 arch/powerpc/mm/book3s32/mmu.c add_hash_page(mm->context.id, ea, pmd_val(*pmd)); ea 19 arch/powerpc/mm/book3s64/hash_4k.c int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, ea 64 arch/powerpc/mm/book3s64/hash_4k.c vpn = hpt_vpn(ea, vsid, ssize); ea 115 arch/powerpc/mm/book3s64/hash_4k.c hash_failure_debug(ea, access, vsid, trap, ssize, ea 36 arch/powerpc/mm/book3s64/hash_64k.c int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, ea 87 arch/powerpc/mm/book3s64/hash_64k.c subpg_index = (ea & (PAGE_SIZE - 1)) >> shift; ea 88 arch/powerpc/mm/book3s64/hash_64k.c vpn = hpt_vpn(ea, vsid, ssize); ea 211 arch/powerpc/mm/book3s64/hash_64k.c hash_failure_debug(ea, access, vsid, trap, ssize, ea 223 arch/powerpc/mm/book3s64/hash_64k.c int __hash_page_64K(unsigned long ea, unsigned long access, ea 270 arch/powerpc/mm/book3s64/hash_64k.c vpn = hpt_vpn(ea, vsid, ssize); ea 323 arch/powerpc/mm/book3s64/hash_64k.c hash_failure_debug(ea, access, vsid, trap, ssize, ea 21 arch/powerpc/mm/book3s64/hash_hugepage.c int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, ea 76 arch/powerpc/mm/book3s64/hash_hugepage.c index = (ea & ~HPAGE_PMD_MASK) >> shift; ea 79 arch/powerpc/mm/book3s64/hash_hugepage.c vpn = hpt_vpn(ea, vsid, ssize); ea 88 arch/powerpc/mm/book3s64/hash_hugepage.c flush_hash_hugepage(vsid, ea, pmdp, MMU_PAGE_64K, ea 166 arch/powerpc/mm/book3s64/hash_hugepage.c hash_failure_debug(ea, access, vsid, trap, ssize, ea 25 arch/powerpc/mm/book3s64/hash_hugetlbpage.c int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, ea 38 arch/powerpc/mm/book3s64/hash_hugetlbpage.c vpn = hpt_vpn(ea, vsid, ssize); ea 117 arch/powerpc/mm/book3s64/hash_hugetlbpage.c hash_failure_debug(ea, access, vsid, trap, ssize, ea 518 arch/powerpc/mm/book3s64/hash_native.c static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, ea 526 arch/powerpc/mm/book3s64/hash_native.c vsid = get_kernel_vsid(ea, ssize); ea 527 arch/powerpc/mm/book3s64/hash_native.c vpn = hpt_vpn(ea, vsid, ssize); ea 550 arch/powerpc/mm/book3s64/hash_native.c static int native_hpte_removebolted(unsigned long ea, int psize, int ssize) ea 557 arch/powerpc/mm/book3s64/hash_native.c vsid = get_kernel_vsid(ea, ssize); ea 558 arch/powerpc/mm/book3s64/hash_native.c vpn = hpt_vpn(ea, vsid, ssize); ea 148 arch/powerpc/mm/book3s64/hash_pgtable.c int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) ea 157 arch/powerpc/mm/book3s64/hash_pgtable.c pgdp = pgd_offset_k(ea); ea 158 arch/powerpc/mm/book3s64/hash_pgtable.c pudp = pud_alloc(&init_mm, pgdp, ea); ea 161 arch/powerpc/mm/book3s64/hash_pgtable.c pmdp = pmd_alloc(&init_mm, pudp, ea); ea 164 arch/powerpc/mm/book3s64/hash_pgtable.c ptep = pte_alloc_kernel(pmdp, ea); ea 167 arch/powerpc/mm/book3s64/hash_pgtable.c set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot)); ea 175 arch/powerpc/mm/book3s64/hash_pgtable.c if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, pgprot_val(prot), ea 1164 arch/powerpc/mm/book3s64/hash_utils.c static int subpage_protection(struct mm_struct *mm, unsigned long ea) ea 1173 arch/powerpc/mm/book3s64/hash_utils.c if (ea >= spt->maxaddr) ea 1175 arch/powerpc/mm/book3s64/hash_utils.c if (ea < 0x100000000UL) { ea 1179 arch/powerpc/mm/book3s64/hash_utils.c sbpm = spt->protptrs[ea >> SBP_L3_SHIFT]; ea 1183 arch/powerpc/mm/book3s64/hash_utils.c sbpp = sbpm[(ea >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)]; ea 1186 arch/powerpc/mm/book3s64/hash_utils.c spp = sbpp[(ea >> PAGE_SHIFT) & (SBP_L1_COUNT - 1)]; ea 1189 arch/powerpc/mm/book3s64/hash_utils.c spp >>= 30 - 2 * ((ea >> 12) & 0xf); ea 1202 arch/powerpc/mm/book3s64/hash_utils.c static inline int subpage_protection(struct mm_struct *mm, unsigned long ea) ea 1208 arch/powerpc/mm/book3s64/hash_utils.c void hash_failure_debug(unsigned long ea, unsigned long access, ea 1215 arch/powerpc/mm/book3s64/hash_utils.c ea, access, current->comm); ea 1220 arch/powerpc/mm/book3s64/hash_utils.c static void check_paca_psize(unsigned long ea, struct mm_struct *mm, ea 1224 arch/powerpc/mm/book3s64/hash_utils.c if (psize != get_paca_psize(ea)) { ea 1243 arch/powerpc/mm/book3s64/hash_utils.c int hash_page_mm(struct mm_struct *mm, unsigned long ea, ea 1257 arch/powerpc/mm/book3s64/hash_utils.c ea, access, trap); ea 1258 arch/powerpc/mm/book3s64/hash_utils.c trace_hash_fault(ea, access, trap); ea 1261 arch/powerpc/mm/book3s64/hash_utils.c switch (get_region_id(ea)) { ea 1269 arch/powerpc/mm/book3s64/hash_utils.c psize = get_slice_psize(mm, ea); ea 1270 arch/powerpc/mm/book3s64/hash_utils.c ssize = user_segment_size(ea); ea 1271 arch/powerpc/mm/book3s64/hash_utils.c vsid = get_user_vsid(&mm->context, ea, ssize); ea 1274 arch/powerpc/mm/book3s64/hash_utils.c vsid = get_kernel_vsid(ea, mmu_kernel_ssize); ea 1280 arch/powerpc/mm/book3s64/hash_utils.c vsid = get_kernel_vsid(ea, mmu_kernel_ssize); ea 1321 arch/powerpc/mm/book3s64/hash_utils.c ea &= ~((1ul << mmu_psize_defs[psize].shift) - 1); ea 1325 arch/powerpc/mm/book3s64/hash_utils.c ptep = find_linux_pte(pgdir, ea, &is_thp, &hugeshift); ea 1347 arch/powerpc/mm/book3s64/hash_utils.c rc = __hash_page_thp(ea, access, vsid, (pmd_t *)ptep, ea 1351 arch/powerpc/mm/book3s64/hash_utils.c rc = __hash_page_huge(ea, access, vsid, ptep, trap, ea 1364 arch/powerpc/mm/book3s64/hash_utils.c check_paca_psize(ea, mm, psize, user_region); ea 1379 arch/powerpc/mm/book3s64/hash_utils.c demote_segment_4k(mm, ea); ea 1389 arch/powerpc/mm/book3s64/hash_utils.c demote_segment_4k(mm, ea); ea 1391 arch/powerpc/mm/book3s64/hash_utils.c } else if (ea < VMALLOC_END) { ea 1408 arch/powerpc/mm/book3s64/hash_utils.c check_paca_psize(ea, mm, psize, user_region); ea 1412 arch/powerpc/mm/book3s64/hash_utils.c rc = __hash_page_64K(ea, access, vsid, ptep, trap, ea 1417 arch/powerpc/mm/book3s64/hash_utils.c int spp = subpage_protection(mm, ea); ea 1421 arch/powerpc/mm/book3s64/hash_utils.c rc = __hash_page_4K(ea, access, vsid, ptep, trap, ea 1430 arch/powerpc/mm/book3s64/hash_utils.c hash_failure_debug(ea, access, vsid, trap, ssize, psize, ea 1446 arch/powerpc/mm/book3s64/hash_utils.c int hash_page(unsigned long ea, unsigned long access, unsigned long trap, ea 1452 arch/powerpc/mm/book3s64/hash_utils.c if ((get_region_id(ea) == VMALLOC_REGION_ID) || ea 1453 arch/powerpc/mm/book3s64/hash_utils.c (get_region_id(ea) == IO_REGION_ID)) ea 1459 arch/powerpc/mm/book3s64/hash_utils.c return hash_page_mm(mm, ea, access, trap, flags); ea 1463 arch/powerpc/mm/book3s64/hash_utils.c int __hash_page(unsigned long trap, unsigned long ea, unsigned long dsisr, ea 1469 arch/powerpc/mm/book3s64/hash_utils.c unsigned int region_id = get_region_id(ea); ea 1494 arch/powerpc/mm/book3s64/hash_utils.c return hash_page_mm(mm, ea, access, trap, flags); ea 1498 arch/powerpc/mm/book3s64/hash_utils.c static bool should_hash_preload(struct mm_struct *mm, unsigned long ea) ea 1500 arch/powerpc/mm/book3s64/hash_utils.c int psize = get_slice_psize(mm, ea); ea 1509 arch/powerpc/mm/book3s64/hash_utils.c if (unlikely((psize == MMU_PAGE_4K) && subpage_protection(mm, ea))) ea 1515 arch/powerpc/mm/book3s64/hash_utils.c static bool should_hash_preload(struct mm_struct *mm, unsigned long ea) ea 1521 arch/powerpc/mm/book3s64/hash_utils.c static void hash_preload(struct mm_struct *mm, unsigned long ea, ea 1532 arch/powerpc/mm/book3s64/hash_utils.c BUG_ON(get_region_id(ea) != USER_REGION_ID); ea 1534 arch/powerpc/mm/book3s64/hash_utils.c if (!should_hash_preload(mm, ea)) ea 1538 arch/powerpc/mm/book3s64/hash_utils.c " trap=%lx\n", mm, mm->pgd, ea, access, trap); ea 1546 arch/powerpc/mm/book3s64/hash_utils.c ssize = user_segment_size(ea); ea 1547 arch/powerpc/mm/book3s64/hash_utils.c vsid = get_user_vsid(&mm->context, ea, ssize); ea 1560 arch/powerpc/mm/book3s64/hash_utils.c ptep = find_current_mm_pte(pgdir, ea, NULL, &hugepage_shift); ea 1583 arch/powerpc/mm/book3s64/hash_utils.c rc = __hash_page_64K(ea, access, vsid, ptep, trap, ea 1587 arch/powerpc/mm/book3s64/hash_utils.c rc = __hash_page_4K(ea, access, vsid, ptep, trap, update_flags, ea 1588 arch/powerpc/mm/book3s64/hash_utils.c ssize, subpage_protection(mm, ea)); ea 1594 arch/powerpc/mm/book3s64/hash_utils.c hash_failure_debug(ea, access, vsid, trap, ssize, ea 58 arch/powerpc/mm/book3s64/radix_pgtable.c static int early_map_kernel_page(unsigned long ea, unsigned long pa, ea 70 arch/powerpc/mm/book3s64/radix_pgtable.c pgdp = pgd_offset_k(ea); ea 76 arch/powerpc/mm/book3s64/radix_pgtable.c pudp = pud_offset(pgdp, ea); ea 86 arch/powerpc/mm/book3s64/radix_pgtable.c pmdp = pmd_offset(pudp, ea); ea 96 arch/powerpc/mm/book3s64/radix_pgtable.c ptep = pte_offset_kernel(pmdp, ea); ea 99 arch/powerpc/mm/book3s64/radix_pgtable.c set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags)); ea 108 arch/powerpc/mm/book3s64/radix_pgtable.c static int __map_kernel_page(unsigned long ea, unsigned long pa, ea 129 arch/powerpc/mm/book3s64/radix_pgtable.c return early_map_kernel_page(ea, pa, flags, map_page_size, ea 137 arch/powerpc/mm/book3s64/radix_pgtable.c pgdp = pgd_offset_k(ea); ea 138 arch/powerpc/mm/book3s64/radix_pgtable.c pudp = pud_alloc(&init_mm, pgdp, ea); ea 145 arch/powerpc/mm/book3s64/radix_pgtable.c pmdp = pmd_alloc(&init_mm, pudp, ea); ea 152 arch/powerpc/mm/book3s64/radix_pgtable.c ptep = pte_alloc_kernel(pmdp, ea); ea 157 arch/powerpc/mm/book3s64/radix_pgtable.c set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags)); ea 162 arch/powerpc/mm/book3s64/radix_pgtable.c int radix__map_kernel_page(unsigned long ea, unsigned long pa, ea 166 arch/powerpc/mm/book3s64/radix_pgtable.c return __map_kernel_page(ea, pa, flags, map_page_size, -1, 0, 0); ea 886 arch/powerpc/mm/book3s64/radix_pgtable.c static int __map_kernel_page_nid(unsigned long ea, unsigned long pa, ea 890 arch/powerpc/mm/book3s64/radix_pgtable.c return __map_kernel_page(ea, pa, flags, map_page_size, nid, 0, 0); ea 33 arch/powerpc/mm/book3s64/slb.c static long slb_allocate_user(struct mm_struct *mm, unsigned long ea); ea 38 arch/powerpc/mm/book3s64/slb.c static inline unsigned long mk_esid_data(unsigned long ea, int ssize, ea 41 arch/powerpc/mm/book3s64/slb.c return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index; ea 51 arch/powerpc/mm/book3s64/slb.c static inline unsigned long mk_vsid_data(unsigned long ea, int ssize, ea 54 arch/powerpc/mm/book3s64/slb.c return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags); ea 57 arch/powerpc/mm/book3s64/slb.c static void assert_slb_presence(bool present, unsigned long ea) ea 71 arch/powerpc/mm/book3s64/slb.c ea &= ~((1UL << 28) - 1); ea 72 arch/powerpc/mm/book3s64/slb.c asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0"); ea 78 arch/powerpc/mm/book3s64/slb.c static inline void slb_shadow_update(unsigned long ea, int ssize, ea 90 arch/powerpc/mm/book3s64/slb.c WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags))); ea 91 arch/powerpc/mm/book3s64/slb.c WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index))); ea 99 arch/powerpc/mm/book3s64/slb.c static inline void create_shadowed_slbe(unsigned long ea, int ssize, ea 108 arch/powerpc/mm/book3s64/slb.c slb_shadow_update(ea, ssize, flags, index); ea 110 arch/powerpc/mm/book3s64/slb.c assert_slb_presence(false, ea); ea 112 arch/powerpc/mm/book3s64/slb.c : "r" (mk_vsid_data(ea, ssize, flags)), ea 113 arch/powerpc/mm/book3s64/slb.c "r" (mk_esid_data(ea, ssize, index)) ea 281 arch/powerpc/mm/book3s64/slb.c static bool preload_add(struct thread_info *ti, unsigned long ea) ea 288 arch/powerpc/mm/book3s64/slb.c if (ea & ESID_MASK_1T) ea 289 arch/powerpc/mm/book3s64/slb.c ea &= ESID_MASK_1T; ea 292 arch/powerpc/mm/book3s64/slb.c esid = ea >> SID_SHIFT; ea 434 arch/powerpc/mm/book3s64/slb.c unsigned long ea; ea 436 arch/powerpc/mm/book3s64/slb.c ea = (unsigned long) ea 444 arch/powerpc/mm/book3s64/slb.c slbie_data = ea; ea 493 arch/powerpc/mm/book3s64/slb.c unsigned long ea; ea 496 arch/powerpc/mm/book3s64/slb.c ea = (unsigned long)ti->slb_preload_esid[idx] << SID_SHIFT; ea 498 arch/powerpc/mm/book3s64/slb.c slb_allocate_user(mm, ea); ea 640 arch/powerpc/mm/book3s64/slb.c static long slb_insert_entry(unsigned long ea, unsigned long context, ea 647 arch/powerpc/mm/book3s64/slb.c vsid = get_vsid(context, ea, ssize); ea 665 arch/powerpc/mm/book3s64/slb.c esid_data = mk_esid_data(ea, ssize, index); ea 673 arch/powerpc/mm/book3s64/slb.c assert_slb_presence(false, ea); ea 684 arch/powerpc/mm/book3s64/slb.c static long slb_allocate_kernel(unsigned long ea, unsigned long id) ea 693 arch/powerpc/mm/book3s64/slb.c if ((ea & EA_MASK) > (1UL << MAX_PHYSMEM_BITS)) ea 701 arch/powerpc/mm/book3s64/slb.c if (ea >= H_VMEMMAP_END) ea 708 arch/powerpc/mm/book3s64/slb.c if (ea >= H_VMALLOC_END) ea 715 arch/powerpc/mm/book3s64/slb.c if (ea >= H_KERN_IO_END) ea 728 arch/powerpc/mm/book3s64/slb.c context = get_kernel_context(ea); ea 730 arch/powerpc/mm/book3s64/slb.c return slb_insert_entry(ea, context, flags, ssize, true); ea 733 arch/powerpc/mm/book3s64/slb.c static long slb_allocate_user(struct mm_struct *mm, unsigned long ea) ea 744 arch/powerpc/mm/book3s64/slb.c if (ea >= mm_ctx_slb_addr_limit(&mm->context)) ea 747 arch/powerpc/mm/book3s64/slb.c context = get_user_context(&mm->context, ea); ea 751 arch/powerpc/mm/book3s64/slb.c if (unlikely(ea >= H_PGTABLE_RANGE)) { ea 756 arch/powerpc/mm/book3s64/slb.c ssize = user_segment_size(ea); ea 758 arch/powerpc/mm/book3s64/slb.c bpsize = get_slice_psize(mm, ea); ea 761 arch/powerpc/mm/book3s64/slb.c return slb_insert_entry(ea, context, flags, ssize, false); ea 764 arch/powerpc/mm/book3s64/slb.c long do_slb_fault(struct pt_regs *regs, unsigned long ea) ea 766 arch/powerpc/mm/book3s64/slb.c unsigned long id = get_region_id(ea); ea 796 arch/powerpc/mm/book3s64/slb.c err = slb_allocate_kernel(ea, id); ea 808 arch/powerpc/mm/book3s64/slb.c err = slb_allocate_user(mm, ea); ea 810 arch/powerpc/mm/book3s64/slb.c preload_add(current_thread_info(), ea); ea 816 arch/powerpc/mm/book3s64/slb.c void do_bad_slb_fault(struct pt_regs *regs, unsigned long ea, long err) ea 820 arch/powerpc/mm/book3s64/slb.c _exception(SIGSEGV, regs, SEGV_BNDERR, ea); ea 822 arch/powerpc/mm/book3s64/slb.c bad_page_fault(regs, ea, SIGSEGV); ea 23 arch/powerpc/mm/copro_fault.c int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, ea 38 arch/powerpc/mm/copro_fault.c vma = find_vma(mm, ea); ea 42 arch/powerpc/mm/copro_fault.c if (ea < vma->vm_start) { ea 45 arch/powerpc/mm/copro_fault.c if (expand_stack(vma, ea)) ea 67 arch/powerpc/mm/copro_fault.c *flt = handle_mm_fault(vma, ea, is_write ? FAULT_FLAG_WRITE : 0); ea 90 arch/powerpc/mm/copro_fault.c int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb) ea 95 arch/powerpc/mm/copro_fault.c switch (get_region_id(ea)) { ea 97 arch/powerpc/mm/copro_fault.c pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea); ea 100 arch/powerpc/mm/copro_fault.c psize = get_slice_psize(mm, ea); ea 101 arch/powerpc/mm/copro_fault.c ssize = user_segment_size(ea); ea 102 arch/powerpc/mm/copro_fault.c vsid = get_user_vsid(&mm->context, ea, ssize); ea 106 arch/powerpc/mm/copro_fault.c pr_devel("%s: 0x%llx -- VMALLOC_REGION_ID\n", __func__, ea); ea 109 arch/powerpc/mm/copro_fault.c vsid = get_kernel_vsid(ea, mmu_kernel_ssize); ea 113 arch/powerpc/mm/copro_fault.c pr_devel("%s: 0x%llx -- IO_REGION_ID\n", __func__, ea); ea 116 arch/powerpc/mm/copro_fault.c vsid = get_kernel_vsid(ea, mmu_kernel_ssize); ea 120 arch/powerpc/mm/copro_fault.c pr_devel("%s: 0x%llx -- LINEAR_MAP_REGION_ID\n", __func__, ea); ea 123 arch/powerpc/mm/copro_fault.c vsid = get_kernel_vsid(ea, mmu_kernel_ssize); ea 127 arch/powerpc/mm/copro_fault.c pr_debug("%s: invalid region access at %016llx\n", __func__, ea); ea 139 arch/powerpc/mm/copro_fault.c slb->esid = (ea & (ssize == MMU_SEGSIZE_1T ? ESID_MASK_1T : ESID_MASK)) | SLB_ESID_V; ea 62 arch/powerpc/mm/ioremap.c int early_ioremap_range(unsigned long ea, phys_addr_t pa, ea 68 arch/powerpc/mm/ioremap.c int err = map_kernel_page(ea + i, pa + i, prot); ea 10 arch/powerpc/mm/ioremap_64.c void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot) ea 13 arch/powerpc/mm/ioremap_64.c unsigned long va = (unsigned long)ea; ea 19 arch/powerpc/mm/ioremap_64.c if ((ea + size) >= (void *)IOREMAP_END) { ea 25 arch/powerpc/mm/ioremap_64.c WARN_ON(((unsigned long)ea) & ~PAGE_MASK); ea 39 arch/powerpc/mm/ioremap_64.c return (void __iomem *)ea; ea 48 arch/powerpc/mm/ioremap_64.c void __iounmap_at(void *ea, unsigned long size) ea 50 arch/powerpc/mm/ioremap_64.c WARN_ON(((unsigned long)ea) & ~PAGE_MASK); ea 53 arch/powerpc/mm/ioremap_64.c unmap_kernel_range((unsigned long)ea, size); ea 94 arch/powerpc/mm/mmu_decl.h void hash_preload(struct mm_struct *mm, unsigned long ea); ea 101 arch/powerpc/mm/nohash/book3e_hugetlbpage.c static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid) ea 113 arch/powerpc/mm/nohash/book3e_hugetlbpage.c : "=&r"(found) : "r"(ea)); ea 119 arch/powerpc/mm/nohash/book3e_hugetlbpage.c : "=&r"(found) : "r"(ea)); ea 126 arch/powerpc/mm/nohash/book3e_hugetlbpage.c book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte) ea 135 arch/powerpc/mm/nohash/book3e_hugetlbpage.c if (unlikely(is_kernel_addr(ea))) ea 151 arch/powerpc/mm/nohash/book3e_hugetlbpage.c if (unlikely(book3e_tlb_exists(ea, mm->context.id))) { ea 162 arch/powerpc/mm/nohash/book3e_hugetlbpage.c mas2 = ea & ~((1UL << shift) - 1); ea 73 arch/powerpc/mm/nohash/book3e_pgtable.c int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) ea 82 arch/powerpc/mm/nohash/book3e_pgtable.c pgdp = pgd_offset_k(ea); ea 83 arch/powerpc/mm/nohash/book3e_pgtable.c pudp = pud_alloc(&init_mm, pgdp, ea); ea 86 arch/powerpc/mm/nohash/book3e_pgtable.c pmdp = pmd_alloc(&init_mm, pudp, ea); ea 89 arch/powerpc/mm/nohash/book3e_pgtable.c ptep = pte_alloc_kernel(pmdp, ea); ea 93 arch/powerpc/mm/nohash/book3e_pgtable.c pgdp = pgd_offset_k(ea); ea 100 arch/powerpc/mm/nohash/book3e_pgtable.c pudp = pud_offset(pgdp, ea); ea 105 arch/powerpc/mm/nohash/book3e_pgtable.c pmdp = pmd_offset(pudp, ea); ea 110 arch/powerpc/mm/nohash/book3e_pgtable.c ptep = pte_offset_kernel(pmdp, ea); ea 112 arch/powerpc/mm/nohash/book3e_pgtable.c set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot)); ea 312 arch/powerpc/mm/pgtable.c pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, ea 328 arch/powerpc/mm/pgtable.c pgdp = pgdir + pgd_index(ea); ea 355 arch/powerpc/mm/pgtable.c pudp = pud_offset(&pgd, ea); ea 372 arch/powerpc/mm/pgtable.c pmdp = pmd_offset(&pud, ea); ea 410 arch/powerpc/mm/pgtable.c return pte_offset_kernel(&pmd, ea); ea 416 arch/powerpc/mm/pgtable.c ret_pte = hugepte_offset(*hpdp, ea, pdshift); ea 184 arch/powerpc/mm/ptdump/hashpagetable.c static void dump_hpte_info(struct pg_state *st, unsigned long ea, u64 v, u64 r, ea 189 arch/powerpc/mm/ptdump/hashpagetable.c while (ea >= st->marker[1].start_address) { ea 193 arch/powerpc/mm/ptdump/hashpagetable.c seq_printf(st->seq, "0x%lx:\t", ea); ea 207 arch/powerpc/mm/ptdump/hashpagetable.c static int native_find(unsigned long ea, int psize, bool primary, u64 *v, u64 ea 216 arch/powerpc/mm/ptdump/hashpagetable.c vsid = get_kernel_vsid(ea, ssize); ea 217 arch/powerpc/mm/ptdump/hashpagetable.c vpn = hpt_vpn(ea, vsid, ssize); ea 240 arch/powerpc/mm/ptdump/hashpagetable.c static int pseries_find(unsigned long ea, int psize, bool primary, u64 *v, u64 *r) ea 249 arch/powerpc/mm/ptdump/hashpagetable.c vsid = get_kernel_vsid(ea, ssize); ea 250 arch/powerpc/mm/ptdump/hashpagetable.c vpn = hpt_vpn(ea, vsid, ssize); ea 314 arch/powerpc/mm/ptdump/hashpagetable.c static int base_hpte_find(unsigned long ea, int psize, bool primary, u64 *v, ea 318 arch/powerpc/mm/ptdump/hashpagetable.c return pseries_find(ea, psize, primary, v, r); ea 320 arch/powerpc/mm/ptdump/hashpagetable.c return native_find(ea, psize, primary, v, r); ea 323 arch/powerpc/mm/ptdump/hashpagetable.c static unsigned long hpte_find(struct pg_state *st, unsigned long ea, int psize) ea 330 arch/powerpc/mm/ptdump/hashpagetable.c if (ea < PAGE_OFFSET) ea 334 arch/powerpc/mm/ptdump/hashpagetable.c slot = base_hpte_find(ea, psize, true, &v, &r); ea 338 arch/powerpc/mm/ptdump/hashpagetable.c slot = base_hpte_find(ea, psize, false, &v, &r); ea 368 arch/powerpc/mm/ptdump/hashpagetable.c dump_hpte_info(st, ea, v, r, rpn, base_psize, actual_psize, lp_bits); ea 151 arch/powerpc/platforms/cell/spu_base.c static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) ea 156 arch/powerpc/platforms/cell/spu_base.c ret = copro_calculate_slb(spu->mm, ea, &slb); ea 171 arch/powerpc/platforms/cell/spu_base.c extern int hash_page(unsigned long ea, unsigned long access, ea 173 arch/powerpc/platforms/cell/spu_base.c static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) ea 177 arch/powerpc/platforms/cell/spu_base.c pr_debug("%s, %llx, %lx\n", __func__, dsisr, ea); ea 184 arch/powerpc/platforms/cell/spu_base.c (get_region_id(ea) != USER_REGION_ID)) { ea 187 arch/powerpc/platforms/cell/spu_base.c ret = hash_page(ea, ea 198 arch/powerpc/platforms/cell/spu_base.c spu->class_1_dar = ea; ea 211 arch/powerpc/platforms/cell/spu_base.c unsigned long ea = (unsigned long)addr; ea 214 arch/powerpc/platforms/cell/spu_base.c if (get_region_id(ea) == LINEAR_MAP_REGION_ID) ea 219 arch/powerpc/platforms/cell/spu_base.c slb->vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) | ea 221 arch/powerpc/platforms/cell/spu_base.c slb->esid = (ea & ESID_MASK) | SLB_ESID_V; ea 231 arch/powerpc/platforms/cell/spu_base.c unsigned long ea = (unsigned long)new_addr; ea 235 arch/powerpc/platforms/cell/spu_base.c if (!((slbs[i].esid ^ ea) & ESID_MASK)) ea 24 arch/powerpc/platforms/cell/spufs/fault.c unsigned long ea, int type) ea 38 arch/powerpc/platforms/cell/spufs/fault.c force_sig_fault(SIGSEGV, SEGV_ACCERR, (void __user *)ea); ea 88 arch/powerpc/platforms/cell/spufs/fault.c u64 ea, dsisr, access; ea 102 arch/powerpc/platforms/cell/spufs/fault.c ea = ctx->csa.class_1_dar; ea 110 arch/powerpc/platforms/cell/spufs/fault.c pr_debug("ctx %p: ea %016llx, dsisr %016llx state %d\n", ctx, ea, ea 123 arch/powerpc/platforms/cell/spufs/fault.c ret = hash_page(ea, access, 0x300, dsisr); ea 128 arch/powerpc/platforms/cell/spufs/fault.c ret = copro_handle_mm_fault(current->mm, ea, dsisr, &flt); ea 163 arch/powerpc/platforms/cell/spufs/fault.c spufs_handle_event(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE); ea 1548 arch/powerpc/platforms/cell/spufs/file.c cmd->ea, cmd->size, cmd->tag, cmd->cmd); ea 1563 arch/powerpc/platforms/cell/spufs/file.c if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) { ea 1565 arch/powerpc/platforms/cell/spufs/file.c cmd->ea, cmd->lsa); ea 282 arch/powerpc/platforms/cell/spufs/hw_ops.c out_be64(&prob->mfc_ea_W, cmd->ea); ea 163 arch/powerpc/platforms/cell/spufs/spufs.h uint64_t ea; /* effective address */ ea 764 arch/powerpc/platforms/cell/spufs/switch.c static inline int send_mfc_dma(struct spu *spu, unsigned long ea, ea 783 arch/powerpc/platforms/cell/spufs/switch.c out_be64(&prob->mfc_ea_W, ea); ea 792 arch/powerpc/platforms/cell/spufs/switch.c ea += transfer_size; ea 147 arch/powerpc/platforms/ps3/htab.c static void ps3_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, ea 950 arch/powerpc/platforms/pseries/lpar.c unsigned long ea, ea 956 arch/powerpc/platforms/pseries/lpar.c vsid = get_kernel_vsid(ea, ssize); ea 957 arch/powerpc/platforms/pseries/lpar.c vpn = hpt_vpn(ea, vsid, ssize); ea 1242 arch/powerpc/platforms/pseries/lpar.c static int pSeries_lpar_hpte_removebolted(unsigned long ea, ea 1248 arch/powerpc/platforms/pseries/lpar.c vsid = get_kernel_vsid(ea, ssize); ea 1249 arch/powerpc/platforms/pseries/lpar.c vpn = hpt_vpn(ea, vsid, ssize); ea 323 arch/sh/mm/cache-sh4.c unsigned long a, ea, p; ea 353 arch/sh/mm/cache-sh4.c ea = base_addr + PAGE_SIZE; ea 367 arch/sh/mm/cache-sh4.c } while (a < ea); ea 66 arch/unicore32/include/asm/assembler.h .else; .ifc \cond, ea ea 91 arch/x86/events/intel/pt.c struct dev_ext_attribute *ea = ea 93 arch/x86/events/intel/pt.c enum pt_capabilities cap = (long)ea->var; ea 249 arch/x86/include/asm/kvm_emulate.h ulong ea; ea 715 arch/x86/kvm/emulate.c la = seg_base(ctxt, addr.seg) + addr.ea; ea 744 arch/x86/kvm/emulate.c if (addr.ea <= lim) ea 748 arch/x86/kvm/emulate.c if (addr.ea > lim) ea 753 arch/x86/kvm/emulate.c *max_size = (u64)lim + 1 - addr.ea; ea 786 arch/x86/kvm/emulate.c .ea = dst }; ea 789 arch/x86/kvm/emulate.c addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1); ea 792 arch/x86/kvm/emulate.c ctxt->_eip = addr.ea; ea 884 arch/x86/kvm/emulate.c .ea = ctxt->eip + cur_size }; ea 985 arch/x86/kvm/emulate.c addr.ea += 2; ea 1391 arch/x86/kvm/emulate.c op->addr.mem.ea = modrm_ea; ea 1393 arch/x86/kvm/emulate.c ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea; ea 1407 arch/x86/kvm/emulate.c op->addr.mem.ea = insn_fetch(u16, ctxt); ea 1410 arch/x86/kvm/emulate.c op->addr.mem.ea = insn_fetch(u32, ctxt); ea 1413 arch/x86/kvm/emulate.c op->addr.mem.ea = insn_fetch(u64, ctxt); ea 1434 arch/x86/kvm/emulate.c ctxt->dst.addr.mem.ea = address_mask(ctxt, ea 1435 arch/x86/kvm/emulate.c ctxt->dst.addr.mem.ea + (sv >> 3)); ea 1884 arch/x86/kvm/emulate.c addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); ea 1903 arch/x86/kvm/emulate.c addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); ea 3435 arch/x86/kvm/emulate.c op->addr.mem.ea = register_address(ctxt, reg); ea 4984 arch/x86/kvm/emulate.c op->addr.mem.ea = ctxt->_eip; ea 5069 arch/x86/kvm/emulate.c op->addr.mem.ea = ea 5123 arch/x86/kvm/emulate.c op->addr.mem.ea = ea 5132 arch/x86/kvm/emulate.c op->addr.mem.ea = ea 5141 arch/x86/kvm/emulate.c op->addr.mem.ea = ctxt->_eip; ea 5470 arch/x86/kvm/emulate.c ctxt->memopp->addr.mem.ea = address_mask(ctxt, ea 5471 arch/x86/kvm/emulate.c ctxt->memopp->addr.mem.ea + ctxt->_eip); ea 5727 arch/x86/kvm/emulate.c ctxt->dst.val = ctxt->src.addr.mem.ea; ea 1421 drivers/atm/firestream.c txq->ea = p; ea 442 drivers/atm/firestream.h struct FS_QENTRY *sa, *ea; ea 999 drivers/base/core.c struct dev_ext_attribute *ea = to_ext_attr(attr); ea 1006 drivers/base/core.c *(unsigned long *)(ea->var) = new; ea 1016 drivers/base/core.c struct dev_ext_attribute *ea = to_ext_attr(attr); ea 1017 drivers/base/core.c return snprintf(buf, PAGE_SIZE, "%lx\n", *(unsigned long *)(ea->var)); ea 1025 drivers/base/core.c struct dev_ext_attribute *ea = to_ext_attr(attr); ea 1035 drivers/base/core.c *(int *)(ea->var) = new; ea 1045 drivers/base/core.c struct dev_ext_attribute *ea = to_ext_attr(attr); ea 1047 drivers/base/core.c return snprintf(buf, PAGE_SIZE, "%d\n", *(int *)(ea->var)); ea 1054 drivers/base/core.c struct dev_ext_attribute *ea = to_ext_attr(attr); ea 1056 drivers/base/core.c if (strtobool(buf, ea->var) < 0) ea 1066 drivers/base/core.c struct dev_ext_attribute *ea = to_ext_attr(attr); ea 1068 drivers/base/core.c return snprintf(buf, PAGE_SIZE, "%d\n", *(bool *)(ea->var)); ea 470 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c const struct nvkm_enum *er, *ee, *ec, *ea; ea 485 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c ea = nvkm_enum_find(fifo->func->fault.access, info->access); ea 524 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c info->access, ea ? ea->name : "", info->addr, ea 514 drivers/hwtracing/coresight/coresight-etm-perf.c struct dev_ext_attribute *ea; ea 516 drivers/hwtracing/coresight/coresight-etm-perf.c ea = container_of(dattr, struct dev_ext_attribute, attr); ea 517 drivers/hwtracing/coresight/coresight-etm-perf.c return scnprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)(ea->var)); ea 527 drivers/hwtracing/coresight/coresight-etm-perf.c struct dev_ext_attribute *ea; ea 533 drivers/hwtracing/coresight/coresight-etm-perf.c if (csdev->ea != NULL) ea 539 drivers/hwtracing/coresight/coresight-etm-perf.c ea = devm_kzalloc(dev, sizeof(*ea), GFP_KERNEL); ea 540 drivers/hwtracing/coresight/coresight-etm-perf.c if (!ea) ea 547 drivers/hwtracing/coresight/coresight-etm-perf.c sysfs_attr_init(&ea->attr.attr); ea 548 drivers/hwtracing/coresight/coresight-etm-perf.c ea->attr.attr.name = devm_kstrdup(dev, name, GFP_KERNEL); ea 549 drivers/hwtracing/coresight/coresight-etm-perf.c if (!ea->attr.attr.name) ea 552 drivers/hwtracing/coresight/coresight-etm-perf.c ea->attr.attr.mode = 0444; ea 553 drivers/hwtracing/coresight/coresight-etm-perf.c ea->attr.show = etm_perf_sink_name_show; ea 554 drivers/hwtracing/coresight/coresight-etm-perf.c ea->var = (unsigned long *)hash; ea 557 drivers/hwtracing/coresight/coresight-etm-perf.c &ea->attr.attr, "sinks"); ea 560 drivers/hwtracing/coresight/coresight-etm-perf.c csdev->ea = ea; ea 568 drivers/hwtracing/coresight/coresight-etm-perf.c struct dev_ext_attribute *ea = csdev->ea; ea 574 drivers/hwtracing/coresight/coresight-etm-perf.c if (!ea) ea 578 drivers/hwtracing/coresight/coresight-etm-perf.c &ea->attr.attr, "sinks"); ea 579 drivers/hwtracing/coresight/coresight-etm-perf.c csdev->ea = NULL; ea 538 drivers/hwtracing/coresight/coresight.c if (!csdev->ea) ea 544 drivers/hwtracing/coresight/coresight.c hash = (unsigned long)csdev->ea->var; ea 698 drivers/md/dm-cache-policy-smq.c static void init_allocator(struct entry_alloc *ea, struct entry_space *es, ea 703 drivers/md/dm-cache-policy-smq.c ea->es = es; ea 704 drivers/md/dm-cache-policy-smq.c ea->nr_allocated = 0u; ea 705 drivers/md/dm-cache-policy-smq.c ea->begin = begin; ea 707 drivers/md/dm-cache-policy-smq.c l_init(&ea->free); ea 709 drivers/md/dm-cache-policy-smq.c l_add_tail(ea->es, &ea->free, __get_entry(ea->es, i)); ea 728 drivers/md/dm-cache-policy-smq.c static struct entry *alloc_entry(struct entry_alloc *ea) ea 732 drivers/md/dm-cache-policy-smq.c if (l_empty(&ea->free)) ea 735 drivers/md/dm-cache-policy-smq.c e = l_pop_head(ea->es, &ea->free); ea 737 drivers/md/dm-cache-policy-smq.c ea->nr_allocated++; ea 745 drivers/md/dm-cache-policy-smq.c static struct entry *alloc_particular_entry(struct entry_alloc *ea, unsigned i) ea 747 drivers/md/dm-cache-policy-smq.c struct entry *e = __get_entry(ea->es, ea->begin + i); ea 751 drivers/md/dm-cache-policy-smq.c l_del(ea->es, &ea->free, e); ea 753 drivers/md/dm-cache-policy-smq.c ea->nr_allocated++; ea 758 drivers/md/dm-cache-policy-smq.c static void free_entry(struct entry_alloc *ea, struct entry *e) ea 760 drivers/md/dm-cache-policy-smq.c BUG_ON(!ea->nr_allocated); ea 763 drivers/md/dm-cache-policy-smq.c ea->nr_allocated--; ea 765 drivers/md/dm-cache-policy-smq.c l_add_tail(ea->es, &ea->free, e); ea 768 drivers/md/dm-cache-policy-smq.c static bool allocator_empty(struct entry_alloc *ea) ea 770 drivers/md/dm-cache-policy-smq.c return l_empty(&ea->free); ea 773 drivers/md/dm-cache-policy-smq.c static unsigned get_index(struct entry_alloc *ea, struct entry *e) ea 775 drivers/md/dm-cache-policy-smq.c return to_index(ea->es, e) - ea->begin; ea 778 drivers/md/dm-cache-policy-smq.c static struct entry *get_entry(struct entry_alloc *ea, unsigned index) ea 780 drivers/md/dm-cache-policy-smq.c return __get_entry(ea->es, ea->begin + index); ea 862 drivers/md/dm-cache-policy-smq.c static struct entry *get_sentinel(struct entry_alloc *ea, unsigned level, bool which) ea 864 drivers/md/dm-cache-policy-smq.c return get_entry(ea, which ? level : NR_CACHE_LEVELS + level); ea 85 drivers/misc/cxl/fault.c u64 ea) ea 90 drivers/misc/cxl/fault.c if (!(rc = copro_calculate_slb(mm, ea, &slb))) { ea 113 drivers/misc/cxl/fault.c struct mm_struct *mm, u64 ea) ea 117 drivers/misc/cxl/fault.c pr_devel("CXL interrupt: Segment fault pe: %i ea: %#llx\n", ctx->pe, ea); ea 118 drivers/misc/cxl/fault.c trace_cxl_ste_miss(ctx, ea); ea 120 drivers/misc/cxl/fault.c if ((rc = cxl_fault_segment(ctx, mm, ea))) ea 283 drivers/misc/cxl/fault.c static void cxl_prefault_one(struct cxl_context *ctx, u64 ea) ea 294 drivers/misc/cxl/fault.c cxl_fault_segment(ctx, mm, ea); ea 299 drivers/misc/cxl/fault.c static u64 next_segment(u64 ea, u64 vsid) ea 302 drivers/misc/cxl/fault.c ea |= (1ULL << 40) - 1; ea 304 drivers/misc/cxl/fault.c ea |= (1ULL << 28) - 1; ea 306 drivers/misc/cxl/fault.c return ea + 1; ea 311 drivers/misc/cxl/fault.c u64 ea, last_esid = 0; ea 326 drivers/misc/cxl/fault.c for (ea = vma->vm_start; ea < vma->vm_end; ea 327 drivers/misc/cxl/fault.c ea = next_segment(ea, slb.vsid)) { ea 328 drivers/misc/cxl/fault.c rc = copro_calculate_slb(mm, ea, &slb); ea 1901 drivers/mmc/core/mmc_test.c unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea; ea 1918 drivers/mmc/core/mmc_test.c ea = mmc_test_rnd_num(range1); ea 1919 drivers/mmc/core/mmc_test.c if (ea == last_ea) ea 1920 drivers/mmc/core/mmc_test.c ea -= 1; ea 1921 drivers/mmc/core/mmc_test.c last_ea = ea; ea 1922 drivers/mmc/core/mmc_test.c dev_addr = rnd_addr + test->card->pref_erase * ea + ea 1180 drivers/net/ethernet/apple/bmac.c bmac_get_station_address(struct net_device *dev, unsigned char *ea) ea 1189 drivers/net/ethernet/apple/bmac.c ea[2*i] = bitrev8(data & 0x0ff); ea 1190 drivers/net/ethernet/apple/bmac.c ea[2*i+1] = bitrev8((data >> 8) & 0x0ff); ea 554 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c struct mlx5e_l2_table *ea = &priv->fs.l2; ea 559 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c ea->allmulti_enabled, ea 560 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c ea->promisc_enabled); ea 598 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c struct mlx5e_l2_table *ea = &priv->fs.l2; ea 606 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c bool enable_promisc = !ea->promisc_enabled && promisc_enabled; ea 607 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c bool disable_promisc = ea->promisc_enabled && !promisc_enabled; ea 608 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled; ea 609 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled; ea 610 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled; ea 611 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled; ea 617 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC); ea 622 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI); ea 624 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH); ea 629 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c mlx5e_del_l2_flow_rule(priv, &ea->broadcast); ea 631 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c mlx5e_del_l2_flow_rule(priv, &ea->allmulti); ea 635 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c mlx5e_del_l2_flow_rule(priv, &ea->promisc); ea 638 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c ea->promisc_enabled = promisc_enabled; ea 639 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c ea->allmulti_enabled = allmulti_enabled; ea 640 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c ea->broadcast_enabled = broadcast_enabled; ea 462 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c memcpy(key_le->ea, key->ea, sizeof(key->ea)); ea 2119 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c memcpy(&scbval.ea, &profile->bssid, ETH_ALEN); ea 2323 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c memcpy((char *)&key->ea, (void *)mac_addr, ETH_ALEN); ea 4797 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c memcpy(&scbval.ea, params->mac, ETH_ALEN); ea 5145 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c memcpy(info.ea, peer, ETH_ALEN); ea 252 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h u8 ea[ETH_ALEN]; /* Station address */ ea 483 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h u8 ea[ETH_ALEN]; /* per station */ ea 505 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h u8 ea[ETH_ALEN]; /* per station */ ea 524 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h u8 ea[ETH_ALEN]; ea 540 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h u8 ea[ETH_ALEN]; /* Station address */ ea 397 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c u8 ea[ETH_ALEN]; ea 731 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c memcpy(&desc->ea[0], addr, ETH_ALEN); ea 738 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c "enter: ea=%pM, ifidx=%u\n", desc->ea, desc->interface_id); ea 746 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c brcmf_fws_macdesc_lookup(struct brcmf_fws_info *fws, u8 *ea) ea 751 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c if (ea == NULL) ea 756 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c if (entry->occupied && !memcmp(entry->ea, ea, ETH_ALEN)) ea 2022 drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c struct brcmf_if *ifp, u8 ea[ETH_ALEN], ea 2033 drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c memcpy(if_request.addr, ea, ETH_ALEN); ea 69 drivers/net/wireless/broadcom/brcm80211/brcmsmac/scb.h u8 ea[ETH_ALEN]; /* station address */ ea 3019 drivers/pci/pci.c int ea; ea 3025 drivers/pci/pci.c ea = pci_find_capability(dev, PCI_CAP_ID_EA); ea 3026 drivers/pci/pci.c if (!ea) ea 3030 drivers/pci/pci.c pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT, ea 3034 drivers/pci/pci.c offset = ea + PCI_EA_FIRST_ENT; ea 1099 drivers/pci/probe.c int ea, offset; ea 1107 drivers/pci/probe.c ea = pci_find_capability(dev, PCI_CAP_ID_EA); ea 1108 drivers/pci/probe.c if (!ea) ea 1111 drivers/pci/probe.c offset = ea + PCI_EA_FIRST_ENT; ea 221 drivers/perf/arm-ccn.c struct dev_ext_attribute *ea = container_of(attr, ea 224 drivers/perf/arm-ccn.c return snprintf(buf, PAGE_SIZE, "%s\n", (char *)ea->var); ea 125 drivers/perf/arm_spe_pmu.c struct dev_ext_attribute *ea = ea 127 drivers/perf/arm_spe_pmu.c int cap = (long)ea->var; ea 101 drivers/scsi/qla2xxx/qla_gbl.h void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea); ea 103 drivers/scsi/qla2xxx/qla_gbl.h struct event_arg *ea); ea 105 drivers/scsi/qla2xxx/qla_gbl.h struct event_arg *ea); ea 675 drivers/scsi/qla2xxx/qla_gbl.h void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea); ea 2965 drivers/scsi/qla2xxx/qla_gs.c void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea) ea 2967 drivers/scsi/qla2xxx/qla_gs.c struct fc_port *fcport = ea->fcport; ea 2972 drivers/scsi/qla2xxx/qla_gs.c fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen, ea 2973 drivers/scsi/qla2xxx/qla_gs.c ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1, fcport->loop_id); ea 2978 drivers/scsi/qla2xxx/qla_gs.c if (ea->sp->gen2 != fcport->login_gen) { ea 2984 drivers/scsi/qla2xxx/qla_gs.c } else if (ea->sp->gen1 != fcport->rscn_gen) { ea 2997 drivers/scsi/qla2xxx/qla_gs.c struct event_arg ea; ea 3033 drivers/scsi/qla2xxx/qla_gs.c memset(&ea, 0, sizeof(ea)); ea 3034 drivers/scsi/qla2xxx/qla_gs.c ea.rc = res; ea 3035 drivers/scsi/qla2xxx/qla_gs.c ea.fcport = fcport; ea 3036 drivers/scsi/qla2xxx/qla_gs.c ea.sp = sp; ea 3037 drivers/scsi/qla2xxx/qla_gs.c qla24xx_handle_gpsc_event(vha, &ea); ea 3148 drivers/scsi/qla2xxx/qla_gs.c void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea) ea 3155 drivers/scsi/qla2xxx/qla_gs.c __func__, __LINE__, ea->id.b24); ea 3157 drivers/scsi/qla2xxx/qla_gs.c if (ea->rc) { ea 3160 drivers/scsi/qla2xxx/qla_gs.c if (fcport->d_id.b24 == ea->id.b24) ea 3167 drivers/scsi/qla2xxx/qla_gs.c fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1); ea 3171 drivers/scsi/qla2xxx/qla_gs.c if ((conflict->d_id.b24 == ea->id.b24) && ea 3210 drivers/scsi/qla2xxx/qla_gs.c fcport->d_id = ea->id; ea 3214 drivers/scsi/qla2xxx/qla_gs.c fcport->d_id = ea->id; ea 3217 drivers/scsi/qla2xxx/qla_gs.c fcport->d_id = ea->id; ea 3223 drivers/scsi/qla2xxx/qla_gs.c if (conflict->d_id.b24 == ea->id.b24) { ea 3242 drivers/scsi/qla2xxx/qla_gs.c __func__, __LINE__, ea->port_name); ea 3243 drivers/scsi/qla2xxx/qla_gs.c qla24xx_post_newsess_work(vha, &ea->id, ea 3244 drivers/scsi/qla2xxx/qla_gs.c ea->port_name, NULL, NULL, FC4_TYPE_UNKNOWN); ea 3256 drivers/scsi/qla2xxx/qla_gs.c struct event_arg ea; ea 3271 drivers/scsi/qla2xxx/qla_gs.c memset(&ea, 0, sizeof(ea)); ea 3272 drivers/scsi/qla2xxx/qla_gs.c memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE); ea 3273 drivers/scsi/qla2xxx/qla_gs.c ea.sp = sp; ea 3274 drivers/scsi/qla2xxx/qla_gs.c ea.id = be_to_port_id(ct_req->req.port_id.port_id); ea 3275 drivers/scsi/qla2xxx/qla_gs.c ea.rc = res; ea 3283 drivers/scsi/qla2xxx/qla_gs.c qla24xx_post_gpnid_work(sp->vha, &ea.id); ea 3289 drivers/scsi/qla2xxx/qla_gs.c qla24xx_post_gpnid_work(sp->vha, &ea.id); ea 3294 drivers/scsi/qla2xxx/qla_gs.c qla24xx_handle_gpnid_event(vha, &ea); ea 3425 drivers/scsi/qla2xxx/qla_gs.c void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea) ea 3427 drivers/scsi/qla2xxx/qla_gs.c fc_port_t *fcport = ea->fcport; ea 3437 drivers/scsi/qla2xxx/qla_gs.c struct event_arg ea; ea 3466 drivers/scsi/qla2xxx/qla_gs.c memset(&ea, 0, sizeof(ea)); ea 3467 drivers/scsi/qla2xxx/qla_gs.c ea.sp = sp; ea 3468 drivers/scsi/qla2xxx/qla_gs.c ea.fcport = sp->fcport; ea 3469 drivers/scsi/qla2xxx/qla_gs.c ea.rc = res; ea 3471 drivers/scsi/qla2xxx/qla_gs.c qla24xx_handle_gffid_event(vha, &ea); ea 4230 drivers/scsi/qla2xxx/qla_gs.c void qla24xx_handle_gnnid_event(scsi_qla_host_t *vha, struct event_arg *ea) ea 4232 drivers/scsi/qla2xxx/qla_gs.c qla24xx_post_gnl_work(vha, ea->fcport); ea 4240 drivers/scsi/qla2xxx/qla_gs.c struct event_arg ea; ea 4248 drivers/scsi/qla2xxx/qla_gs.c memset(&ea, 0, sizeof(ea)); ea 4249 drivers/scsi/qla2xxx/qla_gs.c ea.fcport = fcport; ea 4250 drivers/scsi/qla2xxx/qla_gs.c ea.sp = sp; ea 4251 drivers/scsi/qla2xxx/qla_gs.c ea.rc = res; ea 4257 drivers/scsi/qla2xxx/qla_gs.c qla24xx_handle_gnnid_event(vha, &ea); ea 4340 drivers/scsi/qla2xxx/qla_gs.c void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea) ea 4342 drivers/scsi/qla2xxx/qla_gs.c fc_port_t *fcport = ea->fcport; ea 4347 drivers/scsi/qla2xxx/qla_gs.c fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2, ea 4348 drivers/scsi/qla2xxx/qla_gs.c fcport->rscn_gen, ea->sp->gen1, vha->fcport_count); ea 4353 drivers/scsi/qla2xxx/qla_gs.c if (ea->sp->gen2 != fcport->login_gen) { ea 4359 drivers/scsi/qla2xxx/qla_gs.c } else if (ea->sp->gen1 != fcport->rscn_gen) { ea 4371 drivers/scsi/qla2xxx/qla_gs.c struct event_arg ea; ea 4378 drivers/scsi/qla2xxx/qla_gs.c memset(&ea, 0, sizeof(ea)); ea 4379 drivers/scsi/qla2xxx/qla_gs.c ea.fcport = fcport; ea 4380 drivers/scsi/qla2xxx/qla_gs.c ea.sp = sp; ea 4381 drivers/scsi/qla2xxx/qla_gs.c ea.rc = res; ea 4387 drivers/scsi/qla2xxx/qla_gs.c qla24xx_handle_gfpnid_event(vha, &ea); ea 41 drivers/scsi/qla2xxx/qla_init.c struct event_arg *ea); ea 273 drivers/scsi/qla2xxx/qla_init.c struct event_arg ea; ea 281 drivers/scsi/qla2xxx/qla_init.c memset(&ea, 0, sizeof(ea)); ea 282 drivers/scsi/qla2xxx/qla_init.c ea.fcport = sp->fcport; ea 283 drivers/scsi/qla2xxx/qla_init.c ea.data[0] = lio->u.logio.data[0]; ea 284 drivers/scsi/qla2xxx/qla_init.c ea.data[1] = lio->u.logio.data[1]; ea 285 drivers/scsi/qla2xxx/qla_init.c ea.iop[0] = lio->u.logio.iop[0]; ea 286 drivers/scsi/qla2xxx/qla_init.c ea.iop[1] = lio->u.logio.iop[1]; ea 287 drivers/scsi/qla2xxx/qla_init.c ea.sp = sp; ea 288 drivers/scsi/qla2xxx/qla_init.c qla24xx_handle_plogi_done_event(vha, &ea); ea 485 drivers/scsi/qla2xxx/qla_init.c void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea) ea 487 drivers/scsi/qla2xxx/qla_init.c struct fc_port *fcport = ea->fcport; ea 492 drivers/scsi/qla2xxx/qla_init.c fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2, ea 493 drivers/scsi/qla2xxx/qla_init.c fcport->rscn_gen, ea->sp->gen1, fcport->loop_id); ea 495 drivers/scsi/qla2xxx/qla_init.c WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n", ea 496 drivers/scsi/qla2xxx/qla_init.c ea->data[0]); ea 498 drivers/scsi/qla2xxx/qla_init.c if (ea->data[0] != MBS_COMMAND_COMPLETE) { ea 501 drivers/scsi/qla2xxx/qla_init.c __func__, ea->fcport->port_name); ea 505 drivers/scsi/qla2xxx/qla_init.c qlt_schedule_sess_for_deletion(ea->fcport); ea 509 drivers/scsi/qla2xxx/qla_init.c if (ea->fcport->disc_state == DSC_DELETE_PEND) ea 512 drivers/scsi/qla2xxx/qla_init.c if (ea->sp->gen2 != ea->fcport->login_gen) { ea 516 drivers/scsi/qla2xxx/qla_init.c __func__, ea->fcport->port_name); ea 518 drivers/scsi/qla2xxx/qla_init.c } else if (ea->sp->gen1 != ea->fcport->rscn_gen) { ea 524 drivers/scsi/qla2xxx/qla_init.c __qla24xx_handle_gpdb_event(vha, ea); ea 544 drivers/scsi/qla2xxx/qla_init.c struct event_arg ea; ea 553 drivers/scsi/qla2xxx/qla_init.c memset(&ea, 0, sizeof(ea)); ea 554 drivers/scsi/qla2xxx/qla_init.c ea.rc = res; ea 555 drivers/scsi/qla2xxx/qla_init.c ea.data[0] = lio->u.logio.data[0]; ea 556 drivers/scsi/qla2xxx/qla_init.c ea.data[1] = lio->u.logio.data[1]; ea 557 drivers/scsi/qla2xxx/qla_init.c ea.iop[0] = lio->u.logio.iop[0]; ea 558 drivers/scsi/qla2xxx/qla_init.c ea.iop[1] = lio->u.logio.iop[1]; ea 559 drivers/scsi/qla2xxx/qla_init.c ea.fcport = sp->fcport; ea 560 drivers/scsi/qla2xxx/qla_init.c ea.sp = sp; ea 562 drivers/scsi/qla2xxx/qla_init.c qla24xx_handle_adisc_event(vha, &ea); ea 681 drivers/scsi/qla2xxx/qla_init.c struct event_arg *ea) ea 691 drivers/scsi/qla2xxx/qla_init.c fcport = ea->fcport; ea 695 drivers/scsi/qla2xxx/qla_init.c fcport->fw_login_state, ea->rc, ea 702 drivers/scsi/qla2xxx/qla_init.c if (ea->rc) { /* rval */ ea 722 drivers/scsi/qla2xxx/qla_init.c n = ea->data[0] / sizeof(struct get_name_list_extended); ea 971 drivers/scsi/qla2xxx/qla_init.c struct event_arg ea; ea 986 drivers/scsi/qla2xxx/qla_init.c memset(&ea, 0, sizeof(ea)); ea 987 drivers/scsi/qla2xxx/qla_init.c ea.sp = sp; ea 988 drivers/scsi/qla2xxx/qla_init.c ea.rc = res; ea 994 drivers/scsi/qla2xxx/qla_init.c ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */ ea 1025 drivers/scsi/qla2xxx/qla_init.c ea.fcport = fcport; ea 1027 drivers/scsi/qla2xxx/qla_init.c qla24xx_handle_gnl_done_event(vha, &ea); ea 1158 drivers/scsi/qla2xxx/qla_init.c struct event_arg ea; ea 1169 drivers/scsi/qla2xxx/qla_init.c memset(&ea, 0, sizeof(ea)); ea 1170 drivers/scsi/qla2xxx/qla_init.c ea.fcport = fcport; ea 1171 drivers/scsi/qla2xxx/qla_init.c ea.sp = sp; ea 1173 drivers/scsi/qla2xxx/qla_init.c qla24xx_handle_gpdb_event(vha, &ea); ea 1199 drivers/scsi/qla2xxx/qla_init.c struct event_arg ea; ea 1208 drivers/scsi/qla2xxx/qla_init.c memset(&ea, 0, sizeof(ea)); ea 1209 drivers/scsi/qla2xxx/qla_init.c ea.fcport = sp->fcport; ea 1210 drivers/scsi/qla2xxx/qla_init.c ea.data[0] = lio->u.logio.data[0]; ea 1211 drivers/scsi/qla2xxx/qla_init.c ea.data[1] = lio->u.logio.data[1]; ea 1212 drivers/scsi/qla2xxx/qla_init.c ea.iop[0] = lio->u.logio.iop[0]; ea 1213 drivers/scsi/qla2xxx/qla_init.c ea.iop[1] = lio->u.logio.iop[1]; ea 1214 drivers/scsi/qla2xxx/qla_init.c ea.sp = sp; ea 1216 drivers/scsi/qla2xxx/qla_init.c qla24xx_handle_prli_done_event(vha, &ea); ea 1374 drivers/scsi/qla2xxx/qla_init.c void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) ea 1379 drivers/scsi/qla2xxx/qla_init.c ea->fcport->login_gen++; ea 1380 drivers/scsi/qla2xxx/qla_init.c ea->fcport->deleted = 0; ea 1381 drivers/scsi/qla2xxx/qla_init.c ea->fcport->logout_on_delete = 1; ea 1383 drivers/scsi/qla2xxx/qla_init.c if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) { ea 1385 drivers/scsi/qla2xxx/qla_init.c ea->fcport->login_succ = 1; ea 1388 drivers/scsi/qla2xxx/qla_init.c qla24xx_sched_upd_fcport(ea->fcport); ea 1390 drivers/scsi/qla2xxx/qla_init.c } else if (ea->fcport->login_succ) { ea 1398 drivers/scsi/qla2xxx/qla_init.c __func__, __LINE__, ea->fcport->port_name); ea 1399 drivers/scsi/qla2xxx/qla_init.c ea->fcport->disc_state = DSC_LOGIN_COMPLETE; ea 1405 drivers/scsi/qla2xxx/qla_init.c void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) ea 1407 drivers/scsi/qla2xxx/qla_init.c fc_port_t *fcport = ea->fcport; ea 1409 drivers/scsi/qla2xxx/qla_init.c struct srb *sp = ea->sp; ea 1419 drivers/scsi/qla2xxx/qla_init.c ea->rc); ea 1429 drivers/scsi/qla2xxx/qla_init.c if (ea->sp->gen2 != fcport->login_gen) { ea 1436 drivers/scsi/qla2xxx/qla_init.c } else if (ea->sp->gen1 != fcport->rscn_gen) { ea 1465 drivers/scsi/qla2xxx/qla_init.c __qla24xx_handle_gpdb_event(vha, ea); ea 1693 drivers/scsi/qla2xxx/qla_init.c void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) ea 1698 drivers/scsi/qla2xxx/qla_init.c fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1); ea 1714 drivers/scsi/qla2xxx/qla_init.c struct event_arg *ea) ea 1716 drivers/scsi/qla2xxx/qla_init.c fc_port_t *fcport = ea->fcport; ea 1746 drivers/scsi/qla2xxx/qla_init.c struct event_arg ea; ea 1756 drivers/scsi/qla2xxx/qla_init.c memset(&ea, 0, sizeof(ea)); ea 1757 drivers/scsi/qla2xxx/qla_init.c ea.id = fcport->d_id; ea 1758 drivers/scsi/qla2xxx/qla_init.c ea.id.b.rsvd_1 = RSCN_PORT_ADDR; ea 1759 drivers/scsi/qla2xxx/qla_init.c qla2x00_handle_rscn(fcport->vha, &ea); ea 1870 drivers/scsi/qla2xxx/qla_init.c qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea) ea 1872 drivers/scsi/qla2xxx/qla_init.c WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n", ea 1873 drivers/scsi/qla2xxx/qla_init.c ea->data[0]); ea 1875 drivers/scsi/qla2xxx/qla_init.c switch (ea->data[0]) { ea 1879 drivers/scsi/qla2xxx/qla_init.c __func__, __LINE__, ea->fcport->port_name); ea 1881 drivers/scsi/qla2xxx/qla_init.c ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; ea 1882 drivers/scsi/qla2xxx/qla_init.c ea->fcport->logout_on_delete = 1; ea 1883 drivers/scsi/qla2xxx/qla_init.c ea->fcport->nvme_prli_service_param = ea->iop[0]; ea 1884 drivers/scsi/qla2xxx/qla_init.c if (ea->iop[0] & NVME_PRLI_SP_FIRST_BURST) ea 1885 drivers/scsi/qla2xxx/qla_init.c ea->fcport->nvme_first_burst_size = ea 1886 drivers/scsi/qla2xxx/qla_init.c (ea->iop[1] & 0xffff) * 512; ea 1888 drivers/scsi/qla2xxx/qla_init.c ea->fcport->nvme_first_burst_size = 0; ea 1889 drivers/scsi/qla2xxx/qla_init.c qla24xx_post_gpdb_work(vha, ea->fcport, 0); ea 1892 drivers/scsi/qla2xxx/qla_init.c if ((ea->iop[0] == LSC_SCODE_ELS_REJECT) && ea 1893 drivers/scsi/qla2xxx/qla_init.c (ea->iop[1] == 0x50000)) { /* reson 5=busy expl:0x0 */ ea 1895 drivers/scsi/qla2xxx/qla_init.c ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP; ea 1899 drivers/scsi/qla2xxx/qla_init.c if (ea->fcport->fc4f_nvme) { ea 1902 drivers/scsi/qla2xxx/qla_init.c __func__, __LINE__, ea->fcport->port_name); ea 1903 drivers/scsi/qla2xxx/qla_init.c ea->fcport->fc4f_nvme = 0; ea 1904 drivers/scsi/qla2xxx/qla_init.c qla24xx_post_prli_work(vha, ea->fcport); ea 1910 drivers/scsi/qla2xxx/qla_init.c if (ea->fcport->n2n_link_reset_cnt < 3) { ea 1911 drivers/scsi/qla2xxx/qla_init.c ea->fcport->n2n_link_reset_cnt++; ea 1920 drivers/scsi/qla2xxx/qla_init.c __func__, __LINE__, ea->fcport->port_name); ea 1927 drivers/scsi/qla2xxx/qla_init.c ea->fcport->flags &= ~FCF_ASYNC_SENT; ea 1928 drivers/scsi/qla2xxx/qla_init.c ea->fcport->keep_nport_handle = 0; ea 1929 drivers/scsi/qla2xxx/qla_init.c qlt_schedule_sess_for_deletion(ea->fcport); ea 1936 drivers/scsi/qla2xxx/qla_init.c qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) ea 1942 drivers/scsi/qla2xxx/qla_init.c struct fc_port *fcport = ea->fcport; ea 1947 drivers/scsi/qla2xxx/qla_init.c fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen, ea 1948 drivers/scsi/qla2xxx/qla_init.c ea->sp->gen1, fcport->rscn_gen, ea 1949 drivers/scsi/qla2xxx/qla_init.c ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]); ea 1965 drivers/scsi/qla2xxx/qla_init.c if (ea->sp->gen2 != fcport->login_gen) { ea 1972 drivers/scsi/qla2xxx/qla_init.c } else if (ea->sp->gen1 != fcport->rscn_gen) { ea 1981 drivers/scsi/qla2xxx/qla_init.c WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n", ea 1982 drivers/scsi/qla2xxx/qla_init.c ea->data[0]); ea 1984 drivers/scsi/qla2xxx/qla_init.c switch (ea->data[0]) { ea 1991 drivers/scsi/qla2xxx/qla_init.c if (ea->fcport->fc4f_nvme) { ea 1994 drivers/scsi/qla2xxx/qla_init.c __func__, __LINE__, ea->fcport->port_name); ea 1995 drivers/scsi/qla2xxx/qla_init.c qla24xx_post_prli_work(vha, ea->fcport); ea 1999 drivers/scsi/qla2xxx/qla_init.c __func__, __LINE__, ea->fcport->port_name, ea 2000 drivers/scsi/qla2xxx/qla_init.c ea->fcport->loop_id, ea->fcport->d_id.b24); ea 2002 drivers/scsi/qla2xxx/qla_init.c set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); ea 2004 drivers/scsi/qla2xxx/qla_init.c ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; ea 2005 drivers/scsi/qla2xxx/qla_init.c ea->fcport->logout_on_delete = 1; ea 2006 drivers/scsi/qla2xxx/qla_init.c ea->fcport->send_els_logo = 0; ea 2007 drivers/scsi/qla2xxx/qla_init.c ea->fcport->fw_login_state = DSC_LS_PRLI_COMP; ea 2010 drivers/scsi/qla2xxx/qla_init.c qla24xx_post_gpdb_work(vha, ea->fcport, 0); ea 2015 drivers/scsi/qla2xxx/qla_init.c __func__, __LINE__, ea->fcport->port_name, ea->data[1]); ea 2017 drivers/scsi/qla2xxx/qla_init.c ea->fcport->flags &= ~FCF_ASYNC_SENT; ea 2018 drivers/scsi/qla2xxx/qla_init.c ea->fcport->disc_state = DSC_LOGIN_FAILED; ea 2019 drivers/scsi/qla2xxx/qla_init.c if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED) ea 2022 drivers/scsi/qla2xxx/qla_init.c qla2x00_mark_device_lost(vha, ea->fcport, 1, 0); ea 2026 drivers/scsi/qla2xxx/qla_init.c cid.b.domain = (ea->iop[1] >> 16) & 0xff; ea 2027 drivers/scsi/qla2xxx/qla_init.c cid.b.area = (ea->iop[1] >> 8) & 0xff; ea 2028 drivers/scsi/qla2xxx/qla_init.c cid.b.al_pa = ea->iop[1] & 0xff; ea 2033 drivers/scsi/qla2xxx/qla_init.c __func__, __LINE__, ea->fcport->port_name, ea 2034 drivers/scsi/qla2xxx/qla_init.c ea->fcport->loop_id, cid.b24); ea 2036 drivers/scsi/qla2xxx/qla_init.c set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); ea 2037 drivers/scsi/qla2xxx/qla_init.c ea->fcport->loop_id = FC_NO_LOOP_ID; ea 2038 drivers/scsi/qla2xxx/qla_init.c qla24xx_post_gnl_work(vha, ea->fcport); ea 2041 drivers/scsi/qla2xxx/qla_init.c lid = ea->iop[1] & 0xffff; ea 2043 drivers/scsi/qla2xxx/qla_init.c wwn_to_u64(ea->fcport->port_name), ea 2044 drivers/scsi/qla2xxx/qla_init.c ea->fcport->d_id, lid, &conflict_fcport); ea 2052 drivers/scsi/qla2xxx/qla_init.c conflict_fcport->conflict = ea->fcport; ea 2053 drivers/scsi/qla2xxx/qla_init.c ea->fcport->login_pause = 1; ea 2057 drivers/scsi/qla2xxx/qla_init.c __func__, __LINE__, ea->fcport->port_name, ea 2058 drivers/scsi/qla2xxx/qla_init.c ea->fcport->d_id.b24, lid); ea 2062 drivers/scsi/qla2xxx/qla_init.c __func__, __LINE__, ea->fcport->port_name, ea 2063 drivers/scsi/qla2xxx/qla_init.c ea->fcport->d_id.b24, lid); ea 2065 drivers/scsi/qla2xxx/qla_init.c qla2x00_clear_loop_id(ea->fcport); ea 2067 drivers/scsi/qla2xxx/qla_init.c ea->fcport->loop_id = lid; ea 2068 drivers/scsi/qla2xxx/qla_init.c ea->fcport->keep_nport_handle = 0; ea 2069 drivers/scsi/qla2xxx/qla_init.c qlt_schedule_sess_for_deletion(ea->fcport); ea 2750 drivers/scsi/qla2xxx/qla_iocb.c struct event_arg ea; ea 2766 drivers/scsi/qla2xxx/qla_iocb.c memset(&ea, 0, sizeof(ea)); ea 2767 drivers/scsi/qla2xxx/qla_iocb.c ea.fcport = fcport; ea 2768 drivers/scsi/qla2xxx/qla_iocb.c ea.data[0] = MBS_COMMAND_COMPLETE; ea 2769 drivers/scsi/qla2xxx/qla_iocb.c ea.sp = sp; ea 2770 drivers/scsi/qla2xxx/qla_iocb.c qla24xx_handle_plogi_done_event(vha, &ea); ea 1114 drivers/scsi/qla2xxx/qla_isr.c struct event_arg ea; ea 1116 drivers/scsi/qla2xxx/qla_isr.c memset(&ea, 0, sizeof(ea)); ea 1117 drivers/scsi/qla2xxx/qla_isr.c ea.id.b24 = rscn_entry; ea 1118 drivers/scsi/qla2xxx/qla_isr.c ea.id.b.rsvd_1 = rscn_entry >> 24; ea 1119 drivers/scsi/qla2xxx/qla_isr.c qla2x00_handle_rscn(vha, &ea); ea 5352 drivers/scsi/qla2xxx/qla_os.c struct event_arg ea; ea 5370 drivers/scsi/qla2xxx/qla_os.c memset(&ea, 0, sizeof(ea)); ea 5371 drivers/scsi/qla2xxx/qla_os.c ea.fcport = fcport; ea 5372 drivers/scsi/qla2xxx/qla_os.c qla24xx_handle_relogin_event(vha, &ea); ea 452 drivers/slimbus/qcom-ctrl.c struct slim_eaddr ea; ea 455 drivers/slimbus/qcom-ctrl.c ea.manf_id = be16_to_cpup((__be16 *)&buf[2]); ea 456 drivers/slimbus/qcom-ctrl.c ea.prod_code = be16_to_cpup((__be16 *)&buf[4]); ea 457 drivers/slimbus/qcom-ctrl.c ea.dev_index = buf[6]; ea 458 drivers/slimbus/qcom-ctrl.c ea.instance = buf[7]; ea 460 drivers/slimbus/qcom-ctrl.c ret = slim_device_report_present(&ctrl->ctrl, &ea, ea 1001 drivers/slimbus/qcom-ngd-ctrl.c struct slim_eaddr *ea, u8 *laddr) ea 1027 drivers/slimbus/qcom-ngd-ctrl.c memcpy(&wbuf[1], ea, sizeof(*ea)); ea 417 drivers/slimbus/slimbus.h struct slim_eaddr *ea, u8 laddr); ea 419 drivers/slimbus/slimbus.h struct slim_eaddr *ea, u8 *laddr); ea 1093 fs/cifs/smb2ops.c struct smb2_file_full_ea_info *ea = NULL; ea 1181 fs/cifs/smb2ops.c len = sizeof(ea) + ea_name_len + ea_value_len + 1; ea 1182 fs/cifs/smb2ops.c ea = kzalloc(len, GFP_KERNEL); ea 1183 fs/cifs/smb2ops.c if (ea == NULL) { ea 1188 fs/cifs/smb2ops.c ea->ea_name_length = ea_name_len; ea 1189 fs/cifs/smb2ops.c ea->ea_value_length = cpu_to_le16(ea_value_len); ea 1190 fs/cifs/smb2ops.c memcpy(ea->ea_data, ea_name, ea_name_len + 1); ea 1191 fs/cifs/smb2ops.c memcpy(ea->ea_data + ea_name_len + 1, ea_value, ea_value_len); ea 1194 fs/cifs/smb2ops.c data[0] = ea; ea 1216 fs/cifs/smb2ops.c kfree(ea); ea 434 fs/gfs2/inode.c struct gfs2_ea_header *ea; ea 441 fs/gfs2/inode.c ea = GFS2_EA_BH2FIRST(bh); ea 442 fs/gfs2/inode.c ea->ea_rec_len = cpu_to_be32(sdp->sd_jbsize); ea 443 fs/gfs2/inode.c ea->ea_type = GFS2_EATYPE_UNUSED; ea 444 fs/gfs2/inode.c ea->ea_flags = GFS2_EAFLAG_LAST; ea 74 fs/gfs2/xattr.c struct gfs2_ea_header *ea, ea 80 fs/gfs2/xattr.c struct gfs2_ea_header *ea, *prev = NULL; ea 86 fs/gfs2/xattr.c for (ea = GFS2_EA_BH2FIRST(bh);; prev = ea, ea = GFS2_EA2NEXT(ea)) { ea 87 fs/gfs2/xattr.c if (!GFS2_EA_REC_LEN(ea)) ea 89 fs/gfs2/xattr.c if (!(bh->b_data <= (char *)ea && (char *)GFS2_EA2NEXT(ea) <= ea 92 fs/gfs2/xattr.c if (!GFS2_EATYPE_VALID(ea->ea_type)) ea 95 fs/gfs2/xattr.c error = ea_call(ip, bh, ea, prev, data); ea 99 fs/gfs2/xattr.c if (GFS2_EA_IS_LAST(ea)) { ea 100 fs/gfs2/xattr.c if ((char *)GFS2_EA2NEXT(ea) != ea 165 fs/gfs2/xattr.c struct gfs2_ea_header *ea, struct gfs2_ea_header *prev, ea 170 fs/gfs2/xattr.c if (ea->ea_type == GFS2_EATYPE_UNUSED) ea 173 fs/gfs2/xattr.c if (ea->ea_type == ef->type) { ea 174 fs/gfs2/xattr.c if (ea->ea_name_len == ef->namel && ea 175 fs/gfs2/xattr.c !memcmp(GFS2_EA2NAME(ea), ef->name, ea->ea_name_len)) { ea 179 fs/gfs2/xattr.c el->el_ea = ea; ea 224 fs/gfs2/xattr.c struct gfs2_ea_header *ea, ea 243 fs/gfs2/xattr.c if (GFS2_EA_IS_STUFFED(ea)) ea 246 fs/gfs2/xattr.c dataptrs = GFS2_EA2DATAPTRS(ea); ea 247 fs/gfs2/xattr.c for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) { ea 273 fs/gfs2/xattr.c dataptrs = GFS2_EA2DATAPTRS(ea); ea 274 fs/gfs2/xattr.c for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) { ea 297 fs/gfs2/xattr.c len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea); ea 300 fs/gfs2/xattr.c if (GFS2_EA_IS_LAST(ea)) ea 303 fs/gfs2/xattr.c ea->ea_type = GFS2_EATYPE_UNUSED; ea 304 fs/gfs2/xattr.c ea->ea_num_ptrs = 0; ea 318 fs/gfs2/xattr.c struct gfs2_ea_header *ea, ea 331 fs/gfs2/xattr.c error = ea_dealloc_unstuffed(ip, bh, ea, prev, (leave) ? &error : NULL); ea 344 fs/gfs2/xattr.c struct gfs2_ea_header *ea, struct gfs2_ea_header *prev, ea 353 fs/gfs2/xattr.c if (ea->ea_type == GFS2_EATYPE_UNUSED) ea 356 fs/gfs2/xattr.c switch (ea->ea_type) { ea 373 fs/gfs2/xattr.c ea_size = l + ea->ea_name_len + 1; ea 379 fs/gfs2/xattr.c memcpy(er->er_data + ei->ei_size + l, GFS2_EA2NAME(ea), ea 380 fs/gfs2/xattr.c ea->ea_name_len); ea 439 fs/gfs2/xattr.c static int gfs2_iter_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea, ea 444 fs/gfs2/xattr.c unsigned int amount = GFS2_EA_DATA_LEN(ea); ea 446 fs/gfs2/xattr.c __be64 *dataptrs = GFS2_EA2DATAPTRS(ea); ea 623 fs/gfs2/xattr.c struct gfs2_ea_header *ea; ea 637 fs/gfs2/xattr.c ea = GFS2_EA_BH2FIRST(*bhp); ea 638 fs/gfs2/xattr.c ea->ea_rec_len = cpu_to_be32(sdp->sd_jbsize); ea 639 fs/gfs2/xattr.c ea->ea_type = GFS2_EATYPE_UNUSED; ea 640 fs/gfs2/xattr.c ea->ea_flags = GFS2_EAFLAG_LAST; ea 641 fs/gfs2/xattr.c ea->ea_num_ptrs = 0; ea 660 fs/gfs2/xattr.c static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea, ea 666 fs/gfs2/xattr.c ea->ea_data_len = cpu_to_be32(er->er_data_len); ea 667 fs/gfs2/xattr.c ea->ea_name_len = er->er_name_len; ea 668 fs/gfs2/xattr.c ea->ea_type = er->er_type; ea 669 fs/gfs2/xattr.c ea->__pad = 0; ea 671 fs/gfs2/xattr.c memcpy(GFS2_EA2NAME(ea), er->er_name, er->er_name_len); ea 674 fs/gfs2/xattr.c ea->ea_num_ptrs = 0; ea 675 fs/gfs2/xattr.c memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len); ea 677 fs/gfs2/xattr.c __be64 *dataptr = GFS2_EA2DATAPTRS(ea); ea 683 fs/gfs2/xattr.c ea->ea_num_ptrs = DIV_ROUND_UP(er->er_data_len, sdp->sd_jbsize); ea 684 fs/gfs2/xattr.c for (x = 0; x < ea->ea_num_ptrs; x++) { ea 809 fs/gfs2/xattr.c static struct gfs2_ea_header *ea_split_ea(struct gfs2_ea_header *ea) ea 811 fs/gfs2/xattr.c u32 ea_size = GFS2_EA_SIZE(ea); ea 812 fs/gfs2/xattr.c struct gfs2_ea_header *new = (struct gfs2_ea_header *)((char *)ea + ea 814 fs/gfs2/xattr.c u32 new_size = GFS2_EA_REC_LEN(ea) - ea_size; ea 815 fs/gfs2/xattr.c int last = ea->ea_flags & GFS2_EAFLAG_LAST; ea 817 fs/gfs2/xattr.c ea->ea_rec_len = cpu_to_be32(ea_size); ea 818 fs/gfs2/xattr.c ea->ea_flags ^= last; ea 829 fs/gfs2/xattr.c struct gfs2_ea_header *ea = el->el_ea; ea 835 fs/gfs2/xattr.c if (!prev || !GFS2_EA_IS_STUFFED(ea)) { ea 836 fs/gfs2/xattr.c ea->ea_type = GFS2_EATYPE_UNUSED; ea 838 fs/gfs2/xattr.c } else if (GFS2_EA2NEXT(prev) != ea) { ea 840 fs/gfs2/xattr.c gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), GFS2_EA2NEXT(prev) == ea); ea 843 fs/gfs2/xattr.c len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea); ea 846 fs/gfs2/xattr.c if (GFS2_EA_IS_LAST(ea)) ea 861 fs/gfs2/xattr.c struct gfs2_ea_header *ea, struct ea_set *es) ea 873 fs/gfs2/xattr.c ea = ea_split_ea(ea); ea 875 fs/gfs2/xattr.c ea_write(ip, ea, er); ea 891 fs/gfs2/xattr.c struct gfs2_ea_header *ea = es->es_ea; ea 897 fs/gfs2/xattr.c ea = ea_split_ea(ea); ea 899 fs/gfs2/xattr.c error = ea_write(ip, ea, er); ea 910 fs/gfs2/xattr.c struct gfs2_ea_header *ea, struct gfs2_ea_header *prev, ea 921 fs/gfs2/xattr.c if (ea->ea_type == GFS2_EATYPE_UNUSED) { ea 922 fs/gfs2/xattr.c if (GFS2_EA_REC_LEN(ea) < size) ea 924 fs/gfs2/xattr.c if (!GFS2_EA_IS_STUFFED(ea)) { ea 925 fs/gfs2/xattr.c error = ea_remove_unstuffed(ip, bh, ea, prev, 1); ea 930 fs/gfs2/xattr.c } else if (GFS2_EA_REC_LEN(ea) - GFS2_EA_SIZE(ea) >= size) ea 936 fs/gfs2/xattr.c error = ea_set_simple_noalloc(ip, bh, ea, es); ea 943 fs/gfs2/xattr.c es->es_ea = ea; ea 1076 fs/gfs2/xattr.c struct gfs2_ea_header *ea = el->el_ea; ea 1089 fs/gfs2/xattr.c len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea); ea 1092 fs/gfs2/xattr.c if (GFS2_EA_IS_LAST(ea)) ea 1095 fs/gfs2/xattr.c ea->ea_type = GFS2_EATYPE_UNUSED; ea 13 fs/gfs2/xattr.h #define GFS2_EA_REC_LEN(ea) be32_to_cpu((ea)->ea_rec_len) ea 14 fs/gfs2/xattr.h #define GFS2_EA_DATA_LEN(ea) be32_to_cpu((ea)->ea_data_len) ea 16 fs/gfs2/xattr.h #define GFS2_EA_SIZE(ea) \ ea 17 fs/gfs2/xattr.h ALIGN(sizeof(struct gfs2_ea_header) + (ea)->ea_name_len + \ ea 18 fs/gfs2/xattr.h ((GFS2_EA_IS_STUFFED(ea)) ? GFS2_EA_DATA_LEN(ea) : \ ea 19 fs/gfs2/xattr.h (sizeof(__be64) * (ea)->ea_num_ptrs)), 8) ea 21 fs/gfs2/xattr.h #define GFS2_EA_IS_STUFFED(ea) (!(ea)->ea_num_ptrs) ea 22 fs/gfs2/xattr.h #define GFS2_EA_IS_LAST(ea) ((ea)->ea_flags & GFS2_EAFLAG_LAST) ea 27 fs/gfs2/xattr.h #define GFS2_EA2NAME(ea) ((char *)((struct gfs2_ea_header *)(ea) + 1)) ea 28 fs/gfs2/xattr.h #define GFS2_EA2DATA(ea) (GFS2_EA2NAME(ea) + (ea)->ea_name_len) ea 30 fs/gfs2/xattr.h #define GFS2_EA2DATAPTRS(ea) \ ea 31 fs/gfs2/xattr.h ((__be64 *)(GFS2_EA2NAME(ea) + ALIGN((ea)->ea_name_len, 8))) ea 33 fs/gfs2/xattr.h #define GFS2_EA2NEXT(ea) \ ea 34 fs/gfs2/xattr.h ((struct gfs2_ea_header *)((char *)(ea) + GFS2_EA_REC_LEN(ea))) ea 485 fs/hpfs/anode.c struct extended_attribute *ea; ea 491 fs/hpfs/anode.c for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea)) ea 492 fs/hpfs/anode.c if (ea_indirect(ea)) ea 493 fs/hpfs/anode.c hpfs_ea_remove(s, ea_sec(ea), ea_in_anode(ea), ea_len(ea)); ea 20 fs/hpfs/ea.c struct extended_attribute *ea = (struct extended_attribute *)ex; ea 27 fs/hpfs/ea.c if (ea_indirect(ea)) { ea 28 fs/hpfs/ea.c if (ea_valuelen(ea) != 8) { ea 33 fs/hpfs/ea.c if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 9, ex+4)) ea 35 fs/hpfs/ea.c hpfs_ea_remove(s, ea_sec(ea), ea_in_anode(ea), ea_len(ea)); ea 37 fs/hpfs/ea.c pos += ea->namelen + ea_valuelen(ea) + 5; ea 81 fs/hpfs/ea.c struct extended_attribute *ea; ea 83 fs/hpfs/ea.c for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea)) ea 84 fs/hpfs/ea.c if (!strcmp(ea->name, key)) { ea 85 fs/hpfs/ea.c if (ea_indirect(ea)) ea 87 fs/hpfs/ea.c if (ea_valuelen(ea) >= size) ea 89 fs/hpfs/ea.c memcpy(buf, ea_data(ea), ea_valuelen(ea)); ea 90 fs/hpfs/ea.c buf[ea_valuelen(ea)] = 0; ea 98 fs/hpfs/ea.c ea = (struct extended_attribute *)ex; ea 105 fs/hpfs/ea.c if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea_indirect(ea) ? 8 : 0), ex + 4)) ea 107 fs/hpfs/ea.c if (!strcmp(ea->name, key)) { ea 108 fs/hpfs/ea.c if (ea_indirect(ea)) ea 110 fs/hpfs/ea.c if (ea_valuelen(ea) >= size) ea 112 fs/hpfs/ea.c if (hpfs_ea_read(s, a, ano, pos + 4 + ea->namelen + 1, ea_valuelen(ea), buf)) ea 114 fs/hpfs/ea.c buf[ea_valuelen(ea)] = 0; ea 117 fs/hpfs/ea.c pos += ea->namelen + ea_valuelen(ea) + 5; ea 121 fs/hpfs/ea.c if (ea_len(ea) >= size) ea 123 fs/hpfs/ea.c if (hpfs_ea_read(s, ea_sec(ea), ea_in_anode(ea), 0, ea_len(ea), buf)) ea 125 fs/hpfs/ea.c buf[ea_len(ea)] = 0; ea 136 fs/hpfs/ea.c struct extended_attribute *ea; ea 138 fs/hpfs/ea.c for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea)) ea 139 fs/hpfs/ea.c if (!strcmp(ea->name, key)) { ea 140 fs/hpfs/ea.c if (ea_indirect(ea)) ea 141 fs/hpfs/ea.c return get_indirect_ea(s, ea_in_anode(ea), ea_sec(ea), *size = ea_len(ea)); ea 142 fs/hpfs/ea.c if (!(ret = kmalloc((*size = ea_valuelen(ea)) + 1, GFP_NOFS))) { ea 146 fs/hpfs/ea.c memcpy(ret, ea_data(ea), ea_valuelen(ea)); ea 147 fs/hpfs/ea.c ret[ea_valuelen(ea)] = 0; ea 156 fs/hpfs/ea.c ea = (struct extended_attribute *)ex; ea 163 fs/hpfs/ea.c if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea_indirect(ea) ? 8 : 0), ex + 4)) ea 165 fs/hpfs/ea.c if (!strcmp(ea->name, key)) { ea 166 fs/hpfs/ea.c if (ea_indirect(ea)) ea 167 fs/hpfs/ea.c return get_indirect_ea(s, ea_in_anode(ea), ea_sec(ea), *size = ea_len(ea)); ea 168 fs/hpfs/ea.c if (!(ret = kmalloc((*size = ea_valuelen(ea)) + 1, GFP_NOFS))) { ea 172 fs/hpfs/ea.c if (hpfs_ea_read(s, a, ano, pos + 4 + ea->namelen + 1, ea_valuelen(ea), ret)) { ea 176 fs/hpfs/ea.c ret[ea_valuelen(ea)] = 0; ea 179 fs/hpfs/ea.c pos += ea->namelen + ea_valuelen(ea) + 5; ea 199 fs/hpfs/ea.c struct extended_attribute *ea; ea 201 fs/hpfs/ea.c for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea)) ea 202 fs/hpfs/ea.c if (!strcmp(ea->name, key)) { ea 203 fs/hpfs/ea.c if (ea_indirect(ea)) { ea 204 fs/hpfs/ea.c if (ea_len(ea) == size) ea 205 fs/hpfs/ea.c set_indirect_ea(s, ea_in_anode(ea), ea_sec(ea), data, size); ea 206 fs/hpfs/ea.c } else if (ea_valuelen(ea) == size) { ea 207 fs/hpfs/ea.c memcpy(ea_data(ea), data, size); ea 217 fs/hpfs/ea.c ea = (struct extended_attribute *)ex; ea 224 fs/hpfs/ea.c if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea_indirect(ea) ? 8 : 0), ex + 4)) ea 226 fs/hpfs/ea.c if (!strcmp(ea->name, key)) { ea 227 fs/hpfs/ea.c if (ea_indirect(ea)) { ea 228 fs/hpfs/ea.c if (ea_len(ea) == size) ea 229 fs/hpfs/ea.c set_indirect_ea(s, ea_in_anode(ea), ea_sec(ea), data, size); ea 232 fs/hpfs/ea.c if (ea_valuelen(ea) == size) ea 233 fs/hpfs/ea.c hpfs_ea_write(s, a, ano, pos + 4 + ea->namelen + 1, size, data); ea 237 fs/hpfs/ea.c pos += ea->namelen + ea_valuelen(ea) + 5; ea 255 fs/hpfs/ea.c ea = fnode_end_ea(fnode); ea 256 fs/hpfs/ea.c *(char *)ea = 0; ea 257 fs/hpfs/ea.c ea->namelen = strlen(key); ea 258 fs/hpfs/ea.c ea->valuelen_lo = size; ea 259 fs/hpfs/ea.c ea->valuelen_hi = size >> 8; ea 260 fs/hpfs/ea.c strcpy(ea->name, key); ea 261 fs/hpfs/ea.c memcpy(ea_data(ea), data, size); ea 470 fs/hpfs/hpfs.h u8 ea[316]; /* zero or more EA's, packed together ea 546 fs/hpfs/hpfs.h static inline bool ea_indirect(struct extended_attribute *ea) ea 548 fs/hpfs/hpfs.h return ea->flags & EA_indirect; ea 551 fs/hpfs/hpfs.h static inline bool ea_in_anode(struct extended_attribute *ea) ea 553 fs/hpfs/hpfs.h return ea->flags & EA_anode; ea 145 fs/hpfs/hpfs_fn.h static unsigned ea_valuelen(struct extended_attribute *ea) ea 147 fs/hpfs/hpfs_fn.h return ea->valuelen_lo + 256 * ea->valuelen_hi; ea 150 fs/hpfs/hpfs_fn.h static inline struct extended_attribute *next_ea(struct extended_attribute *ea) ea 152 fs/hpfs/hpfs_fn.h return (struct extended_attribute *)((char *)ea + 5 + ea->namelen + ea_valuelen(ea)); ea 155 fs/hpfs/hpfs_fn.h static inline secno ea_sec(struct extended_attribute *ea) ea 157 fs/hpfs/hpfs_fn.h return le32_to_cpu(get_unaligned((__le32 *)((char *)ea + 9 + ea->namelen))); ea 160 fs/hpfs/hpfs_fn.h static inline secno ea_len(struct extended_attribute *ea) ea 162 fs/hpfs/hpfs_fn.h return le32_to_cpu(get_unaligned((__le32 *)((char *)ea + 5 + ea->namelen))); ea 165 fs/hpfs/hpfs_fn.h static inline char *ea_data(struct extended_attribute *ea) ea 167 fs/hpfs/hpfs_fn.h return (char *)((char *)ea + 5 + ea->namelen); ea 50 fs/hpfs/inode.c void *ea; ea 63 fs/hpfs/inode.c if ((ea = hpfs_get_ea(i->i_sb, fnode, "UID", &ea_size))) { ea 65 fs/hpfs/inode.c i_uid_write(i, le16_to_cpu(*(__le16*)ea)); ea 68 fs/hpfs/inode.c kfree(ea); ea 70 fs/hpfs/inode.c if ((ea = hpfs_get_ea(i->i_sb, fnode, "GID", &ea_size))) { ea 72 fs/hpfs/inode.c i_gid_write(i, le16_to_cpu(*(__le16*)ea)); ea 75 fs/hpfs/inode.c kfree(ea); ea 77 fs/hpfs/inode.c if ((ea = hpfs_get_ea(i->i_sb, fnode, "SYMLINK", &ea_size))) { ea 78 fs/hpfs/inode.c kfree(ea); ea 89 fs/hpfs/inode.c if ((ea = hpfs_get_ea(i->i_sb, fnode, "MODE", &ea_size))) { ea 93 fs/hpfs/inode.c mode = le16_to_cpu(*(__le16*)ea); ea 96 fs/hpfs/inode.c kfree(ea); ea 99 fs/hpfs/inode.c if ((ea = hpfs_get_ea(i->i_sb, fnode, "DEV", &ea_size))) { ea 101 fs/hpfs/inode.c rdev = le32_to_cpu(*(__le32*)ea); ea 102 fs/hpfs/inode.c kfree(ea); ea 154 fs/hpfs/inode.c __le32 ea; ea 156 fs/hpfs/inode.c ea = cpu_to_le32(i_uid_read(i)); ea 157 fs/hpfs/inode.c hpfs_set_ea(i, fnode, "UID", (char*)&ea, 2); ea 161 fs/hpfs/inode.c ea = cpu_to_le32(i_gid_read(i)); ea 162 fs/hpfs/inode.c hpfs_set_ea(i, fnode, "GID", (char *)&ea, 2); ea 170 fs/hpfs/inode.c ea = cpu_to_le32(i->i_mode); ea 172 fs/hpfs/inode.c hpfs_set_ea(i, fnode, "MODE", (char *)&ea, 2); ea 176 fs/hpfs/inode.c ea = cpu_to_le32(new_encode_dev(i->i_rdev)); ea 177 fs/hpfs/inode.c hpfs_set_ea(i, fnode, "DEV", (char *)&ea, 4); ea 172 fs/hpfs/map.c struct extended_attribute *ea; ea 203 fs/hpfs/map.c ea = fnode_ea(fnode); ea 205 fs/hpfs/map.c while (ea != ea_end) { ea 206 fs/hpfs/map.c if (ea > ea_end) { ea 211 fs/hpfs/map.c ea = next_ea(ea); ea 3076 fs/jfs/jfs_imap.c jfs_ip->ea = dip->di_ea; ea 3150 fs/jfs/jfs_imap.c dip->di_ea = jfs_ip->ea; ea 34 fs/jfs/jfs_incore.h dxd_t ea; /* dxd describing ea */ ea 108 fs/jfs/jfs_inode.c memset(&jfs_inode->ea, 0, sizeof(dxd_t)); ea 25 fs/jfs/jfs_xattr.h struct jfs_ea ea[0]; /* Variable length list */ ea 35 fs/jfs/jfs_xattr.h #define EA_SIZE(ea) \ ea 36 fs/jfs/jfs_xattr.h (sizeof (struct jfs_ea) + (ea)->namelen + 1 + \ ea 37 fs/jfs/jfs_xattr.h le16_to_cpu((ea)->valuelen)) ea 38 fs/jfs/jfs_xattr.h #define NEXT_EA(ea) ((struct jfs_ea *) (((char *) (ea)) + (EA_SIZE (ea)))) ea 39 fs/jfs/jfs_xattr.h #define FIRST_EA(ealist) ((ealist)->ea) ea 38 fs/jfs/namei.c dxd_t *ea = &JFS_IP(inode)->ea; ea 40 fs/jfs/namei.c if (ea->flag & DXD_EXTENT) { ea 42 fs/jfs/namei.c invalidate_dxd_metapages(inode, *ea); ea 43 fs/jfs/namei.c dbFree(inode, addressDXD(ea), lengthDXD(ea)); ea 45 fs/jfs/namei.c ea->flag = 0; ea 399 fs/jfs/namei.c if (JFS_IP(ip)->ea.flag & DXD_EXTENT) { ea 401 fs/jfs/namei.c txEA(tid, ip, &JFS_IP(ip)->ea, NULL); ea 403 fs/jfs/namei.c JFS_IP(ip)->ea.flag = 0; ea 653 fs/jfs/namei.c if (JFS_IP(ip)->ea.flag & DXD_EXTENT) ea 655 fs/jfs/namei.c txEA(tid, ip, &JFS_IP(ip)->ea, NULL); ea 713 fs/jfs/namei.c if (JFS_IP(ip)->ea.flag & DXD_EXTENT) { ea 714 fs/jfs/namei.c s64 xaddr = addressDXD(&JFS_IP(ip)->ea); ea 715 fs/jfs/namei.c int xlen = lengthDXD(&JFS_IP(ip)->ea); ea 720 fs/jfs/namei.c invalidate_dxd_metapages(ip, JFS_IP(ip)->ea); ea 95 fs/jfs/xattr.c static inline int name_size(struct jfs_ea *ea) ea 97 fs/jfs/xattr.c if (is_known_namespace(ea->name)) ea 98 fs/jfs/xattr.c return ea->namelen; ea 100 fs/jfs/xattr.c return ea->namelen + XATTR_OS2_PREFIX_LEN; ea 103 fs/jfs/xattr.c static inline int copy_name(char *buffer, struct jfs_ea *ea) ea 105 fs/jfs/xattr.c int len = ea->namelen; ea 107 fs/jfs/xattr.c if (!is_known_namespace(ea->name)) { ea 112 fs/jfs/xattr.c memcpy(buffer, ea->name, ea->namelen); ea 113 fs/jfs/xattr.c buffer[ea->namelen] = 0; ea 144 fs/jfs/xattr.c int size, dxd_t * ea) ea 159 fs/jfs/xattr.c if (!(ji->mode2 & INLINEEA) && !(ji->ea.flag & DXD_INLINE)) ea 162 fs/jfs/xattr.c DXDsize(ea, size); ea 163 fs/jfs/xattr.c DXDlength(ea, 0); ea 164 fs/jfs/xattr.c DXDaddress(ea, 0); ea 166 fs/jfs/xattr.c ea->flag = DXD_INLINE; ea 169 fs/jfs/xattr.c ea->flag = 0; ea 170 fs/jfs/xattr.c DXDsize(ea, 0); ea 171 fs/jfs/xattr.c DXDlength(ea, 0); ea 172 fs/jfs/xattr.c DXDaddress(ea, 0); ea 175 fs/jfs/xattr.c if (ji->ea.flag & DXD_INLINE) ea 202 fs/jfs/xattr.c dxd_t * ea) ea 220 fs/jfs/xattr.c if (!ea_write_inline(ip, ealist, size, ea)) ea 287 fs/jfs/xattr.c ea->flag = DXD_EXTENT; ea 288 fs/jfs/xattr.c DXDsize(ea, le32_to_cpu(ealist->size)); ea 289 fs/jfs/xattr.c DXDlength(ea, nblocks); ea 290 fs/jfs/xattr.c DXDaddress(ea, blkno); ea 293 fs/jfs/xattr.c if (ji->ea.flag & DXD_INLINE) ea 320 fs/jfs/xattr.c int ea_size = sizeDXD(&ji->ea); ea 328 fs/jfs/xattr.c if ((sizeDXD(&ji->ea) > sizeof (ji->i_inline_ea))) ea 365 fs/jfs/xattr.c if (ji->ea.flag & DXD_INLINE) ea 368 fs/jfs/xattr.c nbytes = sizeDXD(&ji->ea); ea 378 fs/jfs/xattr.c nblocks = lengthDXD(&ji->ea) << sbi->l2nbperpage; ea 379 fs/jfs/xattr.c blkno = addressDXD(&ji->ea) << sbi->l2nbperpage; ea 431 fs/jfs/xattr.c int ea_size = sizeDXD(&ji->ea); ea 438 fs/jfs/xattr.c if (ji->ea.flag == 0) ea 460 fs/jfs/xattr.c } else if (ji->ea.flag & DXD_INLINE) { ea 469 fs/jfs/xattr.c if (!(ji->ea.flag & DXD_EXTENT)) { ea 547 fs/jfs/xattr.c ea_buf->mp = read_metapage(inode, addressDXD(&ji->ea), ea 548 fs/jfs/xattr.c lengthDXD(&ji->ea) << sb->s_blocksize_bits, ea 625 fs/jfs/xattr.c if (ji->ea.flag & DXD_EXTENT) { ea 626 fs/jfs/xattr.c invalidate_dxd_metapages(inode, ji->ea); ea 627 fs/jfs/xattr.c old_blocks = lengthDXD(&ji->ea); ea 631 fs/jfs/xattr.c txEA(tid, inode, &ji->ea, &ea_buf->new_ea); ea 634 fs/jfs/xattr.c if (ji->ea.flag & DXD_INLINE) ea 637 fs/jfs/xattr.c ji->ea = ea_buf->new_ea; ea 639 fs/jfs/xattr.c txEA(tid, inode, &ji->ea, NULL); ea 640 fs/jfs/xattr.c if (ji->ea.flag & DXD_INLINE) ea 642 fs/jfs/xattr.c ji->ea.flag = 0; ea 643 fs/jfs/xattr.c ji->ea.size = 0; ea 659 fs/jfs/xattr.c struct jfs_ea *ea, *old_ea = NULL, *next_ea = NULL; ea 682 fs/jfs/xattr.c for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea 683 fs/jfs/xattr.c ea = NEXT_EA(ea)) { ea 684 fs/jfs/xattr.c if ((namelen == ea->namelen) && ea 685 fs/jfs/xattr.c (memcmp(name, ea->name, namelen) == 0)) { ea 691 fs/jfs/xattr.c old_ea = ea; ea 692 fs/jfs/xattr.c old_ea_size = EA_SIZE(ea); ea 693 fs/jfs/xattr.c next_ea = NEXT_EA(ea); ea 695 fs/jfs/xattr.c new_size += EA_SIZE(ea); ea 754 fs/jfs/xattr.c ea = (struct jfs_ea *) ((char *) ealist + xattr_size); ea 755 fs/jfs/xattr.c ea->flag = 0; ea 756 fs/jfs/xattr.c ea->namelen = namelen; ea 757 fs/jfs/xattr.c ea->valuelen = (cpu_to_le16(value_len)); ea 758 fs/jfs/xattr.c memcpy(ea->name, name, namelen); ea 759 fs/jfs/xattr.c ea->name[namelen] = 0; ea 761 fs/jfs/xattr.c memcpy(&ea->name[namelen + 1], value, value_len); ea 762 fs/jfs/xattr.c xattr_size += EA_SIZE(ea); ea 798 fs/jfs/xattr.c struct jfs_ea *ea; ea 820 fs/jfs/xattr.c for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) ea 821 fs/jfs/xattr.c if ((namelen == ea->namelen) && ea 822 fs/jfs/xattr.c memcmp(name, ea->name, namelen) == 0) { ea 824 fs/jfs/xattr.c size = le16_to_cpu(ea->valuelen); ea 831 fs/jfs/xattr.c value = ((char *) &ea->name) + ea->namelen + 1; ea 848 fs/jfs/xattr.c static inline int can_list(struct jfs_ea *ea) ea 850 fs/jfs/xattr.c return (strncmp(ea->name, XATTR_TRUSTED_PREFIX, ea 862 fs/jfs/xattr.c struct jfs_ea *ea; ea 879 fs/jfs/xattr.c for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) { ea 880 fs/jfs/xattr.c if (can_list(ea)) ea 881 fs/jfs/xattr.c size += name_size(ea) + 1; ea 894 fs/jfs/xattr.c for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) { ea 895 fs/jfs/xattr.c if (can_list(ea)) { ea 896 fs/jfs/xattr.c int namelen = copy_name(buffer, ea); ea 1075 fs/ntfs/layout.h } __attribute__ ((__packed__)) ea; ea 50 fs/udf/misc.c uint8_t *ea = NULL, *ad = NULL; ea 55 fs/udf/misc.c ea = iinfo->i_ext.i_data; ea 59 fs/udf/misc.c ad = ea; ea 70 fs/udf/misc.c eahd = (struct extendedAttrHeaderDesc *)ea; ea 107 fs/udf/misc.c memmove(&ea[offset - aal + size], ea 108 fs/udf/misc.c &ea[aal], offset - aal); ea 117 fs/udf/misc.c memmove(&ea[offset - ial + size], ea 118 fs/udf/misc.c &ea[ial], offset - ial); ea 128 fs/udf/misc.c memmove(&ea[offset - aal + size], ea 129 fs/udf/misc.c &ea[aal], offset - aal); ea 142 fs/udf/misc.c return (struct genericFormat *)&ea[offset]; ea 152 fs/udf/misc.c uint8_t *ea = NULL; ea 156 fs/udf/misc.c ea = iinfo->i_ext.i_data; ea 160 fs/udf/misc.c eahd = (struct extendedAttrHeaderDesc *)ea; ea 177 fs/udf/misc.c gaf = (struct genericFormat *)&ea[offset]; ea 889 fs/udf/namei.c uint8_t *ea; ea 941 fs/udf/namei.c ea = epos.bh->b_data + udf_ext0_offset(inode); ea 943 fs/udf/namei.c ea = iinfo->i_ext.i_data + iinfo->i_lenEAttr; ea 946 fs/udf/namei.c pc = (struct pathComponent *)ea; ea 965 fs/udf/namei.c pc = (struct pathComponent *)(ea + elen); ea 168 include/linux/coresight.h struct dev_ext_attribute *ea; ea 716 net/appletalk/aarp.c struct elapaarp *ea = aarp_hdr(skb); ea 731 net/appletalk/aarp.c if (!skb_pull(skb, sizeof(*ea))) ea 734 net/appletalk/aarp.c function = ntohs(ea->function); ea 738 net/appletalk/aarp.c ea->hw_len != ETH_ALEN || ea->pa_len != AARP_PA_ALEN || ea 739 net/appletalk/aarp.c ea->pa_src_zero || ea->pa_dst_zero) ea 743 net/appletalk/aarp.c hash = ea->pa_src_node % (AARP_HASH_SIZE - 1); ea 746 net/appletalk/aarp.c sa.s_node = ea->pa_src_node; ea 747 net/appletalk/aarp.c sa.s_net = ea->pa_src_net; ea 755 net/appletalk/aarp.c ifa->address.s_node == ea->pa_dst_node && ea 756 net/appletalk/aarp.c ifa->address.s_net == ea->pa_dst_net) { ea 762 net/appletalk/aarp.c da.s_node = ea->pa_dst_node; ea 763 net/appletalk/aarp.c da.s_net = ea->pa_dst_net; ea 788 net/appletalk/aarp.c ether_addr_copy(a->hwaddr, ea->hw_src); ea 810 net/appletalk/aarp.c sa.s_node = ea->pa_dst_node; ea 811 net/appletalk/aarp.c sa.s_net = ea->pa_dst_net; ea 852 net/appletalk/aarp.c sa.s_node = ea->pa_src_node; ea 853 net/appletalk/aarp.c sa.s_net = ea->pa_src_net; ea 857 net/appletalk/aarp.c aarp_send_reply(dev, ma, &sa, ea->hw_src); ea 5702 tools/lib/traceevent/event-parse.c struct tep_event * const * ea = a; ea 5705 tools/lib/traceevent/event-parse.c if ((*ea)->id < (*eb)->id) ea 5708 tools/lib/traceevent/event-parse.c if ((*ea)->id > (*eb)->id) ea 5716 tools/lib/traceevent/event-parse.c struct tep_event * const * ea = a; ea 5720 tools/lib/traceevent/event-parse.c res = strcmp((*ea)->name, (*eb)->name); ea 5724 tools/lib/traceevent/event-parse.c res = strcmp((*ea)->system, (*eb)->system); ea 5733 tools/lib/traceevent/event-parse.c struct tep_event * const * ea = a; ea 5737 tools/lib/traceevent/event-parse.c res = strcmp((*ea)->system, (*eb)->system); ea 5741 tools/lib/traceevent/event-parse.c res = strcmp((*ea)->name, (*eb)->name); ea 98 tools/lib/traceevent/parse-filter.c const struct tep_filter_type *ea = a; ea 101 tools/lib/traceevent/parse-filter.c if (ea->event_id < eb->event_id) ea 104 tools/lib/traceevent/parse-filter.c if (ea->event_id > eb->event_id)