esr 168 arch/alpha/include/asm/core_lca.h unsigned long esr; /* error-status register */ esr 187 arch/alpha/include/asm/core_lca.h unsigned long esr; /* error status register */ esr 338 arch/alpha/kernel/core_lca.c mem_error(unsigned long esr, unsigned long ear) esr 341 arch/alpha/kernel/core_lca.c ((esr & ESR_CEE) ? "Correctable" : esr 342 arch/alpha/kernel/core_lca.c (esr & ESR_UEE) ? "Uncorrectable" : "A"), esr 343 arch/alpha/kernel/core_lca.c (esr & ESR_WRE) ? "write" : "read", esr 344 arch/alpha/kernel/core_lca.c (esr & ESR_SOR) ? "memory" : "b-cache", esr 346 arch/alpha/kernel/core_lca.c if (esr & ESR_CTE) { esr 349 arch/alpha/kernel/core_lca.c if (esr & ESR_MSE) { esr 352 arch/alpha/kernel/core_lca.c if (esr & ESR_MHE) { esr 355 arch/alpha/kernel/core_lca.c if (esr & ESR_NXM) { esr 435 arch/alpha/kernel/core_lca.c if (el.s->esr & ESR_EAV) { esr 436 arch/alpha/kernel/core_lca.c mem_error(el.s->esr, el.s->ear); esr 450 arch/alpha/kernel/core_lca.c if (el.l->esr & ESR_EAV) { esr 451 arch/alpha/kernel/core_lca.c mem_error(el.l->esr, el.l->ear); esr 9 arch/arm/include/asm/kvm_ras.h static inline int kvm_handle_guest_sea(phys_addr_t addr, unsigned int esr) esr 78 arch/arm64/include/asm/debug-monitors.h int (*fn)(struct pt_regs *regs, unsigned int esr); esr 89 arch/arm64/include/asm/debug-monitors.h int (*fn)(struct pt_regs *regs, unsigned int esr); esr 70 arch/arm64/include/asm/esr.h #define ESR_ELx_EC(esr) (((esr) & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT) esr 184 arch/arm64/include/asm/esr.h #define ESR_ELx_SYS64_ISS_RT(esr) \ esr 185 arch/arm64/include/asm/esr.h (((esr) & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT) esr 322 arch/arm64/include/asm/esr.h static inline bool esr_is_data_abort(u32 esr) esr 324 arch/arm64/include/asm/esr.h const u32 ec = ESR_ELx_EC(esr); esr 329 arch/arm64/include/asm/esr.h const char *esr_get_class_string(u32 esr); esr 23 arch/arm64/include/asm/exception.h unsigned int esr = ESR_ELx_EC_SERROR << ESR_ELx_EC_SHIFT; esr 26 arch/arm64/include/asm/exception.h esr |= (disr & DISR_EL1_ESR_MASK); esr 28 arch/arm64/include/asm/exception.h esr |= (disr & ESR_ELx_ISS_MASK); esr 30 arch/arm64/include/asm/exception.h return esr; esr 254 arch/arm64/include/asm/kvm_emulate.h u32 esr = kvm_vcpu_get_hsr(vcpu); esr 256 arch/arm64/include/asm/kvm_emulate.h if (esr & ESR_ELx_CV) esr 257 arch/arm64/include/asm/kvm_emulate.h return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT; esr 370 arch/arm64/include/asm/kvm_emulate.h u32 esr = kvm_vcpu_get_hsr(vcpu); esr 371 arch/arm64/include/asm/kvm_emulate.h return ESR_ELx_SYS64_ISS_RT(esr); esr 17 arch/arm64/include/asm/kvm_ras.h static inline int kvm_handle_guest_sea(phys_addr_t addr, unsigned int esr) esr 70 arch/arm64/include/asm/traps.h static inline bool arm64_is_ras_serror(u32 esr) esr 74 arch/arm64/include/asm/traps.h if (esr & ESR_ELx_IDS) esr 90 arch/arm64/include/asm/traps.h static inline u32 arm64_ras_serror_get_severity(u32 esr) esr 92 arch/arm64/include/asm/traps.h u32 aet = esr & ESR_ELx_AET; esr 94 arch/arm64/include/asm/traps.h if (!arm64_is_ras_serror(esr)) { esr 103 arch/arm64/include/asm/traps.h if ((esr & ESR_ELx_FSC) != ESR_ELx_FSC_SERROR) { esr 111 arch/arm64/include/asm/traps.h bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr); esr 112 arch/arm64/include/asm/traps.h void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr); esr 94 arch/arm64/include/uapi/asm/sigcontext.h __u64 esr; esr 202 arch/arm64/kernel/debug-monitors.c static int call_step_hook(struct pt_regs *regs, unsigned int esr) esr 215 arch/arm64/kernel/debug-monitors.c retval = hook->fn(regs, esr); esr 239 arch/arm64/kernel/debug-monitors.c static int single_step_handler(unsigned long unused, unsigned int esr, esr 251 arch/arm64/kernel/debug-monitors.c if (!handler_found && call_step_hook(regs, esr) == DBG_HOOK_HANDLED) esr 300 arch/arm64/kernel/debug-monitors.c static int call_break_hook(struct pt_regs *regs, unsigned int esr) esr 304 arch/arm64/kernel/debug-monitors.c int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL; esr 313 arch/arm64/kernel/debug-monitors.c unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK; esr 319 arch/arm64/kernel/debug-monitors.c return fn ? fn(regs, esr) : DBG_HOOK_ERROR; esr 323 arch/arm64/kernel/debug-monitors.c static int brk_handler(unsigned long unused, unsigned int esr, esr 326 arch/arm64/kernel/debug-monitors.c if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED) esr 925 arch/arm64/kernel/fpsimd.c asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs) esr 952 arch/arm64/kernel/fpsimd.c asmlinkage void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs) esr 961 arch/arm64/kernel/fpsimd.c asmlinkage void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs) esr 965 arch/arm64/kernel/fpsimd.c if (esr & ESR_ELx_FP_EXC_TFV) { esr 966 arch/arm64/kernel/fpsimd.c if (esr & FPEXC_IOF) esr 968 arch/arm64/kernel/fpsimd.c else if (esr & FPEXC_DZF) esr 970 arch/arm64/kernel/fpsimd.c else if (esr & FPEXC_OFF) esr 972 arch/arm64/kernel/fpsimd.c else if (esr & FPEXC_UFF) esr 974 arch/arm64/kernel/fpsimd.c else if (esr & FPEXC_IXF) esr 620 arch/arm64/kernel/hw_breakpoint.c static int breakpoint_handler(unsigned long unused, unsigned int esr, esr 733 arch/arm64/kernel/hw_breakpoint.c static int watchpoint_handler(unsigned long addr, unsigned int esr, esr 762 arch/arm64/kernel/hw_breakpoint.c access = (esr & AARCH64_ESR_ACCESS_MASK) ? HW_BREAKPOINT_W : esr 234 arch/arm64/kernel/kgdb.c static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr) esr 241 arch/arm64/kernel/kgdb.c static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr) esr 250 arch/arm64/kernel/kgdb.c static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr) esr 408 arch/arm64/kernel/probes/kprobes.c kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr) esr 431 arch/arm64/kernel/probes/kprobes.c kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr) esr 169 arch/arm64/kernel/probes/uprobes.c unsigned int esr) esr 178 arch/arm64/kernel/probes/uprobes.c unsigned int esr) esr 639 arch/arm64/kernel/signal.c __put_user_error(current->thread.fault_code, &esr_ctx->esr, err); esr 213 arch/arm64/kernel/traps.c unsigned int esr = tsk->thread.fault_code; esr 223 arch/arm64/kernel/traps.c if (esr) esr 224 arch/arm64/kernel/traps.c pr_cont("%s, ESR 0x%08x, ", esr_get_class_string(esr), esr); esr 429 arch/arm64/kernel/traps.c static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs) esr 432 arch/arm64/kernel/traps.c int rt = ESR_ELx_SYS64_ISS_RT(esr); esr 433 arch/arm64/kernel/traps.c int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT; esr 468 arch/arm64/kernel/traps.c static void ctr_read_handler(unsigned int esr, struct pt_regs *regs) esr 470 arch/arm64/kernel/traps.c int rt = ESR_ELx_SYS64_ISS_RT(esr); esr 487 arch/arm64/kernel/traps.c static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs) esr 489 arch/arm64/kernel/traps.c int rt = ESR_ELx_SYS64_ISS_RT(esr); esr 495 arch/arm64/kernel/traps.c static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs) esr 497 arch/arm64/kernel/traps.c int rt = ESR_ELx_SYS64_ISS_RT(esr); esr 503 arch/arm64/kernel/traps.c static void mrs_handler(unsigned int esr, struct pt_regs *regs) esr 507 arch/arm64/kernel/traps.c rt = ESR_ELx_SYS64_ISS_RT(esr); esr 508 arch/arm64/kernel/traps.c sysreg = esr_sys64_to_sysreg(esr); esr 514 arch/arm64/kernel/traps.c static void wfi_handler(unsigned int esr, struct pt_regs *regs) esr 522 arch/arm64/kernel/traps.c void (*handler)(unsigned int esr, struct pt_regs *regs); esr 592 arch/arm64/kernel/traps.c static bool cp15_cond_valid(unsigned int esr, struct pt_regs *regs) esr 597 arch/arm64/kernel/traps.c if (!(esr & ESR_ELx_CV)) { esr 606 arch/arm64/kernel/traps.c cond = (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT; esr 642 arch/arm64/kernel/traps.c static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs) esr 644 arch/arm64/kernel/traps.c int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT; esr 659 arch/arm64/kernel/traps.c static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs) esr 661 arch/arm64/kernel/traps.c int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT; esr 662 arch/arm64/kernel/traps.c int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT; esr 679 arch/arm64/kernel/traps.c asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs) esr 683 arch/arm64/kernel/traps.c if (!cp15_cond_valid(esr, regs)) { esr 692 arch/arm64/kernel/traps.c switch (ESR_ELx_EC(esr)) { esr 705 arch/arm64/kernel/traps.c if ((hook->esr_mask & esr) == hook->esr_val) { esr 706 arch/arm64/kernel/traps.c hook->handler(esr, regs); esr 719 arch/arm64/kernel/traps.c asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs) esr 724 arch/arm64/kernel/traps.c if ((hook->esr_mask & esr) == hook->esr_val) { esr 725 arch/arm64/kernel/traps.c hook->handler(esr, regs); esr 780 arch/arm64/kernel/traps.c const char *esr_get_class_string(u32 esr) esr 782 arch/arm64/kernel/traps.c return esr_class_str[ESR_ELx_EC(esr)]; esr 789 arch/arm64/kernel/traps.c asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) esr 794 arch/arm64/kernel/traps.c handler[reason], smp_processor_id(), esr, esr 795 arch/arm64/kernel/traps.c esr_get_class_string(esr)); esr 805 arch/arm64/kernel/traps.c asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) esr 810 arch/arm64/kernel/traps.c current->thread.fault_code = esr; esr 826 arch/arm64/kernel/traps.c unsigned int esr = read_sysreg(esr_el1); esr 832 arch/arm64/kernel/traps.c pr_emerg("ESR: 0x%08x -- %s\n", esr, esr_get_class_string(esr)); esr 853 arch/arm64/kernel/traps.c void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr) esr 858 arch/arm64/kernel/traps.c smp_processor_id(), esr, esr_get_class_string(esr)); esr 868 arch/arm64/kernel/traps.c bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr) esr 870 arch/arm64/kernel/traps.c u32 aet = arm64_ras_serror_get_severity(esr); esr 896 arch/arm64/kernel/traps.c arm64_serror_panic(regs, esr); esr 900 arch/arm64/kernel/traps.c asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr) esr 908 arch/arm64/kernel/traps.c if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr)) esr 909 arch/arm64/kernel/traps.c arm64_serror_panic(regs, esr); esr 956 arch/arm64/kernel/traps.c static int bug_handler(struct pt_regs *regs, unsigned int esr) esr 986 arch/arm64/kernel/traps.c #define KASAN_ESR_SIZE(esr) (1 << ((esr) & KASAN_ESR_SIZE_MASK)) esr 988 arch/arm64/kernel/traps.c static int kasan_handler(struct pt_regs *regs, unsigned int esr) esr 990 arch/arm64/kernel/traps.c bool recover = esr & KASAN_ESR_RECOVER; esr 991 arch/arm64/kernel/traps.c bool write = esr & KASAN_ESR_WRITE; esr 992 arch/arm64/kernel/traps.c size_t size = KASAN_ESR_SIZE(esr); esr 1031 arch/arm64/kernel/traps.c int __init early_brk64(unsigned long addr, unsigned int esr, esr 1035 arch/arm64/kernel/traps.c unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK; esr 1038 arch/arm64/kernel/traps.c return kasan_handler(regs, esr) != DBG_HOOK_HANDLED; esr 1040 arch/arm64/kernel/traps.c return bug_handler(regs, esr) != DBG_HOOK_HANDLED; esr 30 arch/arm64/kvm/handle_exit.c static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u32 esr) esr 32 arch/arm64/kvm/handle_exit.c if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(NULL, esr)) esr 277 arch/arm64/kvm/hyp/switch.c u64 esr; esr 280 arch/arm64/kvm/hyp/switch.c esr = vcpu->arch.fault.esr_el2; esr 281 arch/arm64/kvm/hyp/switch.c ec = ESR_ELx_EC(esr); esr 299 arch/arm64/kvm/hyp/switch.c if (!(esr & ESR_ELx_S1PTW) && esr 301 arch/arm64/kvm/hyp/switch.c (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) { esr 117 arch/arm64/kvm/inject_fault.c u32 esr = 0; esr 132 arch/arm64/kvm/inject_fault.c esr |= ESR_ELx_IL; esr 139 arch/arm64/kvm/inject_fault.c esr |= (ESR_ELx_EC_IABT_LOW << ESR_ELx_EC_SHIFT); esr 141 arch/arm64/kvm/inject_fault.c esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT); esr 144 arch/arm64/kvm/inject_fault.c esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT; esr 146 arch/arm64/kvm/inject_fault.c vcpu_write_sys_reg(vcpu, esr | ESR_ELx_FSC_EXTABT, ESR_EL1); esr 152 arch/arm64/kvm/inject_fault.c u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT); esr 165 arch/arm64/kvm/inject_fault.c esr |= ESR_ELx_IL; esr 167 arch/arm64/kvm/inject_fault.c vcpu_write_sys_reg(vcpu, esr, ESR_EL1); esr 216 arch/arm64/kvm/inject_fault.c void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 esr) esr 218 arch/arm64/kvm/inject_fault.c vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK); esr 2290 arch/arm64/kvm/sys_regs.c unsigned long esr = kvm_vcpu_get_hsr(vcpu); esr 2294 arch/arm64/kvm/sys_regs.c trace_kvm_handle_sys_reg(esr); esr 2298 arch/arm64/kvm/sys_regs.c params.Op0 = (esr >> 20) & 3; esr 2299 arch/arm64/kvm/sys_regs.c params.Op1 = (esr >> 14) & 0x7; esr 2300 arch/arm64/kvm/sys_regs.c params.CRn = (esr >> 10) & 0xf; esr 2301 arch/arm64/kvm/sys_regs.c params.CRm = (esr >> 1) & 0xf; esr 2302 arch/arm64/kvm/sys_regs.c params.Op2 = (esr >> 17) & 0x7; esr 2304 arch/arm64/kvm/sys_regs.c params.is_write = !(esr & 1); esr 43 arch/arm64/mm/fault.c int (*fn)(unsigned long addr, unsigned int esr, esr 53 arch/arm64/mm/fault.c static inline const struct fault_info *esr_to_fault_info(unsigned int esr) esr 55 arch/arm64/mm/fault.c return fault_info + (esr & ESR_ELx_FSC); esr 58 arch/arm64/mm/fault.c static inline const struct fault_info *esr_to_debug_fault_info(unsigned int esr) esr 60 arch/arm64/mm/fault.c return debug_fault_info + DBG_ESR_EVT(esr); esr 63 arch/arm64/mm/fault.c static void data_abort_decode(unsigned int esr) esr 67 arch/arm64/mm/fault.c if (esr & ESR_ELx_ISV) { esr 69 arch/arm64/mm/fault.c 1U << ((esr & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT)); esr 71 arch/arm64/mm/fault.c (esr & ESR_ELx_SSE) >> ESR_ELx_SSE_SHIFT, esr 72 arch/arm64/mm/fault.c (esr & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT); esr 74 arch/arm64/mm/fault.c (esr & ESR_ELx_SF) >> ESR_ELx_SF_SHIFT, esr 75 arch/arm64/mm/fault.c (esr & ESR_ELx_AR) >> ESR_ELx_AR_SHIFT); esr 77 arch/arm64/mm/fault.c pr_alert(" ISV = 0, ISS = 0x%08lx\n", esr & ESR_ELx_ISS_MASK); esr 81 arch/arm64/mm/fault.c (esr & ESR_ELx_CM) >> ESR_ELx_CM_SHIFT, esr 82 arch/arm64/mm/fault.c (esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT); esr 85 arch/arm64/mm/fault.c static void mem_abort_decode(unsigned int esr) esr 89 arch/arm64/mm/fault.c pr_alert(" ESR = 0x%08x\n", esr); esr 91 arch/arm64/mm/fault.c ESR_ELx_EC(esr), esr_get_class_string(esr), esr 92 arch/arm64/mm/fault.c (esr & ESR_ELx_IL) ? 32 : 16); esr 94 arch/arm64/mm/fault.c (esr & ESR_ELx_SET_MASK) >> ESR_ELx_SET_SHIFT, esr 95 arch/arm64/mm/fault.c (esr & ESR_ELx_FnV) >> ESR_ELx_FnV_SHIFT); esr 97 arch/arm64/mm/fault.c (esr & ESR_ELx_EA) >> ESR_ELx_EA_SHIFT, esr 98 arch/arm64/mm/fault.c (esr & ESR_ELx_S1PTW) >> ESR_ELx_S1PTW_SHIFT); esr 100 arch/arm64/mm/fault.c if (esr_is_data_abort(esr)) esr 101 arch/arm64/mm/fault.c data_abort_decode(esr); esr 230 arch/arm64/mm/fault.c static bool is_el1_instruction_abort(unsigned int esr) esr 232 arch/arm64/mm/fault.c return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR; esr 235 arch/arm64/mm/fault.c static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr, esr 238 arch/arm64/mm/fault.c unsigned int ec = ESR_ELx_EC(esr); esr 239 arch/arm64/mm/fault.c unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE; esr 255 arch/arm64/mm/fault.c unsigned int esr, esr 261 arch/arm64/mm/fault.c if (ESR_ELx_EC(esr) != ESR_ELx_EC_DABT_CUR || esr 262 arch/arm64/mm/fault.c (esr & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT) esr 287 arch/arm64/mm/fault.c unsigned int esr, struct pt_regs *regs) esr 294 arch/arm64/mm/fault.c mem_abort_decode(esr); esr 297 arch/arm64/mm/fault.c die("Oops", regs, esr); esr 302 arch/arm64/mm/fault.c static void __do_kernel_fault(unsigned long addr, unsigned int esr, esr 311 arch/arm64/mm/fault.c if (!is_el1_instruction_abort(esr) && fixup_exception(regs)) esr 314 arch/arm64/mm/fault.c if (WARN_RATELIMIT(is_spurious_el1_translation_fault(addr, esr, regs), esr 318 arch/arm64/mm/fault.c if (is_el1_permission_fault(addr, esr, regs)) { esr 319 arch/arm64/mm/fault.c if (esr & ESR_ELx_WNR) esr 329 arch/arm64/mm/fault.c die_kernel_fault(msg, addr, esr, regs); esr 332 arch/arm64/mm/fault.c static void set_thread_esr(unsigned long address, unsigned int esr) esr 349 arch/arm64/mm/fault.c switch (ESR_ELx_EC(esr)) { esr 360 arch/arm64/mm/fault.c esr &= ESR_ELx_EC_MASK | ESR_ELx_IL | esr 362 arch/arm64/mm/fault.c esr |= ESR_ELx_FSC_FAULT; esr 370 arch/arm64/mm/fault.c esr &= ESR_ELx_EC_MASK | ESR_ELx_IL; esr 371 arch/arm64/mm/fault.c esr |= ESR_ELx_FSC_FAULT; esr 380 arch/arm64/mm/fault.c WARN(1, "ESR 0x%x is not DABT or IABT from EL0\n", esr); esr 381 arch/arm64/mm/fault.c esr = 0; esr 386 arch/arm64/mm/fault.c current->thread.fault_code = esr; esr 389 arch/arm64/mm/fault.c static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs) esr 396 arch/arm64/mm/fault.c const struct fault_info *inf = esr_to_fault_info(esr); esr 398 arch/arm64/mm/fault.c set_thread_esr(addr, esr); esr 402 arch/arm64/mm/fault.c __do_kernel_fault(addr, esr, regs); esr 437 arch/arm64/mm/fault.c static bool is_el0_instruction_abort(unsigned int esr) esr 439 arch/arm64/mm/fault.c return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW; esr 446 arch/arm64/mm/fault.c static bool is_write_abort(unsigned int esr) esr 448 arch/arm64/mm/fault.c return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM); esr 451 arch/arm64/mm/fault.c static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, esr 460 arch/arm64/mm/fault.c if (kprobe_page_fault(regs, esr)) esr 473 arch/arm64/mm/fault.c if (is_el0_instruction_abort(esr)) { esr 476 arch/arm64/mm/fault.c } else if (is_write_abort(esr)) { esr 481 arch/arm64/mm/fault.c if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr, regs)) { esr 485 arch/arm64/mm/fault.c addr, esr, regs); esr 487 arch/arm64/mm/fault.c if (is_el1_instruction_abort(esr)) esr 489 arch/arm64/mm/fault.c addr, esr, regs); esr 493 arch/arm64/mm/fault.c addr, esr, regs); esr 591 arch/arm64/mm/fault.c inf = esr_to_fault_info(esr); esr 592 arch/arm64/mm/fault.c set_thread_esr(addr, esr); esr 623 arch/arm64/mm/fault.c __do_kernel_fault(addr, esr, regs); esr 628 arch/arm64/mm/fault.c unsigned int esr, esr 632 arch/arm64/mm/fault.c return do_page_fault(addr, esr, regs); esr 634 arch/arm64/mm/fault.c do_bad_area(addr, esr, regs); esr 638 arch/arm64/mm/fault.c static int do_alignment_fault(unsigned long addr, unsigned int esr, esr 641 arch/arm64/mm/fault.c do_bad_area(addr, esr, regs); esr 645 arch/arm64/mm/fault.c static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs) esr 650 arch/arm64/mm/fault.c static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) esr 655 arch/arm64/mm/fault.c inf = esr_to_fault_info(esr); esr 663 arch/arm64/mm/fault.c if (esr & ESR_ELx_FnV) esr 667 arch/arm64/mm/fault.c arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr); esr 739 arch/arm64/mm/fault.c asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, esr 742 arch/arm64/mm/fault.c const struct fault_info *inf = esr_to_fault_info(esr); esr 744 arch/arm64/mm/fault.c if (!inf->fn(addr, esr, regs)) esr 749 arch/arm64/mm/fault.c mem_abort_decode(esr); esr 754 arch/arm64/mm/fault.c inf->sig, inf->code, (void __user *)addr, esr); esr 764 arch/arm64/mm/fault.c unsigned int esr, esr 776 arch/arm64/mm/fault.c do_mem_abort(addr, esr, regs); esr 781 arch/arm64/mm/fault.c unsigned int esr, esr 791 arch/arm64/mm/fault.c SIGBUS, BUS_ADRALN, (void __user *)addr, esr); esr 794 arch/arm64/mm/fault.c int __init early_brk64(unsigned long addr, unsigned int esr, esr 903 arch/arm64/mm/fault.c unsigned int esr, esr 906 arch/arm64/mm/fault.c const struct fault_info *inf = esr_to_debug_fault_info(esr); esr 917 arch/arm64/mm/fault.c if (inf->fn(addr_if_watchpoint, esr, regs)) { esr 919 arch/arm64/mm/fault.c inf->sig, inf->code, (void __user *)pc, esr); esr 58 arch/microblaze/include/asm/thread_info.h __u32 esr; esr 53 arch/microblaze/include/uapi/asm/ptrace.h microblaze_reg_t esr; esr 27 arch/microblaze/kernel/asm-offsets.c DEFINE(PT_ESR, offsetof(struct pt_regs, esr)); esr 123 arch/microblaze/kernel/asm-offsets.c DEFINE(CC_ESR, offsetof(struct cpu_context, esr)); esr 79 arch/microblaze/kernel/exceptions.c (unsigned int) regs->pc, (unsigned int) regs->esr); esr 47 arch/microblaze/kernel/process.c regs->msr, regs->ear, regs->esr, regs->fsr); esr 75 arch/microblaze/kernel/signal.c COPY(pc); COPY(ear); COPY(esr); COPY(fsr); esr 136 arch/microblaze/kernel/signal.c COPY(pc); COPY(ear); COPY(esr); COPY(fsr); esr 97 arch/microblaze/mm/fault.c regs->esr = error_code; esr 2888 arch/mips/include/asm/octeon/cvmx-npei-defs.h uint64_t esr:2; esr 2902 arch/mips/include/asm/octeon/cvmx-npei-defs.h uint64_t esr:2; esr 3483 arch/mips/include/asm/octeon/cvmx-npei-defs.h uint64_t esr:2; esr 3487 arch/mips/include/asm/octeon/cvmx-npei-defs.h uint64_t esr:2; esr 620 arch/mips/include/asm/octeon/cvmx-npi-defs.h uint64_t esr:2; esr 624 arch/mips/include/asm/octeon/cvmx-npi-defs.h uint64_t esr:2; esr 644 arch/mips/include/asm/octeon/cvmx-npi-defs.h uint64_t esr:2; esr 648 arch/mips/include/asm/octeon/cvmx-npi-defs.h uint64_t esr:2; esr 1561 arch/mips/include/asm/octeon/cvmx-npi-defs.h uint64_t esr:2; esr 1575 arch/mips/include/asm/octeon/cvmx-npi-defs.h uint64_t esr:2; esr 1584 arch/mips/include/asm/octeon/cvmx-npi-defs.h uint64_t esr:2; esr 1598 arch/mips/include/asm/octeon/cvmx-npi-defs.h uint64_t esr:2; esr 107 arch/mips/include/asm/octeon/cvmx-sli-defs.h __BITFIELD_FIELD(uint64_t esr:2, esr 119 arch/mips/include/asm/octeon/cvmx-sli-defs.h __BITFIELD_FIELD(uint64_t esr:2, esr 599 arch/mips/pci/pci-octeon.c mem_access.s.esr = 1; /* Endian-Swap on read. */ esr 892 arch/mips/pci/pcie-octeon.c mem_access_subid.s.esr = 1; /* Endian-swap for Reads. */ esr 1344 arch/mips/pci/pcie-octeon.c mem_access_subid.s.esr = 1; /* Endian-swap for Reads. */ esr 972 arch/powerpc/include/asm/kvm_ppc.h SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR) esr 219 arch/powerpc/include/uapi/asm/kvm.h __u32 esr; esr 58 arch/powerpc/include/uapi/asm/kvm_para.h __u32 esr; esr 521 arch/powerpc/kernel/kvm.c kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt); esr 611 arch/powerpc/kernel/kvm.c kvm_patch_ins_stw(inst, magic_var(esr), inst_rt); esr 1501 arch/powerpc/kvm/booke.c sregs->u.e.esr = kvmppc_get_esr(vcpu); esr 1519 arch/powerpc/kvm/booke.c kvmppc_set_esr(vcpu, sregs->u.e.esr); esr 130 arch/powerpc/kvm/booke_emulate.c vcpu->arch.shared->esr = spr_val; esr 391 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.shared->esr; esr 140 arch/powerpc/kvm/e500mc.c mtspr(SPRN_GESR, vcpu->arch.shared->esr); esr 164 arch/powerpc/kvm/e500mc.c vcpu->arch.shared->esr = mfspr(SPRN_GESR); esr 221 arch/powerpc/kvm/emulate.c vcpu->arch.shared->esr | ESR_PTR); esr 237 arch/powerpc/kvm/emulate.c vcpu->arch.shared->esr | ESR_PTR); esr 90 arch/sh/boards/mach-dreamcast/irq.c __u32 esr = ESR_BASE + (LEVEL(irq) << 2); esr 92 arch/sh/boards/mach-dreamcast/irq.c outl((1 << EVENT_BIT(irq)), esr); esr 107 arch/sh/boards/mach-dreamcast/irq.c __u32 emr, esr, status, level; esr 124 arch/sh/boards/mach-dreamcast/irq.c esr = ESR_BASE + (level << 2); esr 127 arch/sh/boards/mach-dreamcast/irq.c status = inl(esr); esr 288 arch/x86/include/asm/apicdef.h } esr; esr 213 drivers/net/can/flexcan.c u32 esr; /* 0x20 */ esr 936 drivers/net/can/flexcan.c reg_esr = priv->read(®s->esr); esr 941 drivers/net/can/flexcan.c priv->write(reg_esr & FLEXCAN_ESR_ALL_INT, ®s->esr); esr 1733 drivers/net/ethernet/dlink/dl2k.c __u16 esr; esr 1743 drivers/net/ethernet/dlink/dl2k.c esr = mii_read (dev, phy_addr, PCS_ESR); esr 1747 drivers/net/ethernet/dlink/dl2k.c if (esr & (MII_ESR_1000BT_HD | MII_ESR_1000BX_HD)) esr 1749 drivers/net/ethernet/dlink/dl2k.c if (esr & (MII_ESR_1000BT_FD | MII_ESR_1000BX_FD)) esr 220 drivers/net/ethernet/ibm/emac/mal.c u32 esr = get_mal_dcrn(mal, MAL_ESR); esr 223 drivers/net/ethernet/ibm/emac/mal.c set_mal_dcrn(mal, MAL_ESR, esr); esr 225 drivers/net/ethernet/ibm/emac/mal.c MAL_DBG(mal, "SERR %08x" NL, esr); esr 227 drivers/net/ethernet/ibm/emac/mal.c if (esr & MAL_ESR_EVB) { esr 228 drivers/net/ethernet/ibm/emac/mal.c if (esr & MAL_ESR_DE) { esr 235 drivers/net/ethernet/ibm/emac/mal.c if (esr & MAL_ESR_PEIN) { esr 243 drivers/net/ethernet/ibm/emac/mal.c mal->index, esr); esr 253 drivers/net/ethernet/ibm/emac/mal.c mal->index, esr); esr 353 drivers/net/ethernet/ibm/emac/mal.c u32 esr = get_mal_dcrn(mal, MAL_ESR); esr 355 drivers/net/ethernet/ibm/emac/mal.c if (esr & MAL_ESR_EVB) { esr 357 drivers/net/ethernet/ibm/emac/mal.c if (esr & MAL_ESR_DE) { esr 358 drivers/net/ethernet/ibm/emac/mal.c if (esr & MAL_ESR_CIDT) esr 493 drivers/net/ethernet/ibm/emac/mal.c regs->esr = get_mal_dcrn(mal, MAL_ESR); esr 289 drivers/net/ethernet/ibm/emac/mal.h u32 esr; esr 553 drivers/net/ethernet/ibm/emac/phy.c u16 esr = phy_read(phy, MII_ESTATUS); esr 554 drivers/net/ethernet/ibm/emac/phy.c if (esr & ESTATUS_1000_TFULL) esr 556 drivers/net/ethernet/ibm/emac/phy.c if (esr & ESTATUS_1000_THALF) esr 81 sound/soc/fsl/fsl_esai.c u32 esr; esr 84 sound/soc/fsl/fsl_esai.c regmap_read(esai_priv->regmap, REG_ESAI_ESR, &esr); esr 93 sound/soc/fsl/fsl_esai.c if (esr & ESAI_ESR_TINIT_MASK) esr 96 sound/soc/fsl/fsl_esai.c if (esr & ESAI_ESR_RFF_MASK) esr 99 sound/soc/fsl/fsl_esai.c if (esr & ESAI_ESR_TFE_MASK) esr 102 sound/soc/fsl/fsl_esai.c if (esr & ESAI_ESR_TLS_MASK) esr 105 sound/soc/fsl/fsl_esai.c if (esr & ESAI_ESR_TDE_MASK) esr 108 sound/soc/fsl/fsl_esai.c if (esr & ESAI_ESR_TED_MASK) esr 111 sound/soc/fsl/fsl_esai.c if (esr & ESAI_ESR_TD_MASK) esr 114 sound/soc/fsl/fsl_esai.c if (esr & ESAI_ESR_RLS_MASK) esr 117 sound/soc/fsl/fsl_esai.c if (esr & ESAI_ESR_RDE_MASK) esr 120 sound/soc/fsl/fsl_esai.c if (esr & ESAI_ESR_RED_MASK) esr 123 sound/soc/fsl/fsl_esai.c if (esr & ESAI_ESR_RD_MASK) esr 219 tools/arch/powerpc/include/uapi/asm/kvm.h __u32 esr; esr 444 virt/kvm/arm/hyp/vgic-v3-sr.c u32 esr = kvm_vcpu_get_hsr(vcpu); esr 445 virt/kvm/arm/hyp/vgic-v3-sr.c u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT; esr 1004 virt/kvm/arm/hyp/vgic-v3-sr.c u32 esr; esr 1010 virt/kvm/arm/hyp/vgic-v3-sr.c esr = kvm_vcpu_get_hsr(vcpu); esr 1017 virt/kvm/arm/hyp/vgic-v3-sr.c sysreg = esr_cp15_to_sysreg(esr); esr 1019 virt/kvm/arm/hyp/vgic-v3-sr.c sysreg = esr_sys64_to_sysreg(esr); esr 1022 virt/kvm/arm/hyp/vgic-v3-sr.c is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ;