er                 90 arch/arm/mach-ux500/pm.c 	u32 er; /* Enable register */
er                 97 arch/arm/mach-ux500/pm.c 		er = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
er                 99 arch/arm/mach-ux500/pm.c 		if (pr & er)
er                144 arch/arm/mach-ux500/pm.c 	u32 er; /* Enable register */
er                149 arch/arm/mach-ux500/pm.c 		er = readl_relaxed(dist_base +
er                151 arch/arm/mach-ux500/pm.c 		writel(er, PRCM_ARMITMSK31TO0 + i * 4);
er                949 arch/mips/kvm/emulate.c 	enum emulation_result er = EMULATE_DONE;
er                963 arch/mips/kvm/emulate.c 		er = EMULATE_FAIL;
er                966 arch/mips/kvm/emulate.c 	return er;
er               1269 arch/mips/kvm/emulate.c 	enum emulation_result er = EMULATE_DONE;
er               1278 arch/mips/kvm/emulate.c 	er = update_pc(vcpu, cause);
er               1279 arch/mips/kvm/emulate.c 	if (er == EMULATE_FAIL)
er               1280 arch/mips/kvm/emulate.c 		return er;
er               1285 arch/mips/kvm/emulate.c 			er = kvm_mips_emul_tlbr(vcpu);
er               1288 arch/mips/kvm/emulate.c 			er = kvm_mips_emul_tlbwi(vcpu);
er               1291 arch/mips/kvm/emulate.c 			er = kvm_mips_emul_tlbwr(vcpu);
er               1294 arch/mips/kvm/emulate.c 			er = kvm_mips_emul_tlbp(vcpu);
er               1300 arch/mips/kvm/emulate.c 			er = kvm_mips_emul_eret(vcpu);
er               1303 arch/mips/kvm/emulate.c 			er = kvm_mips_emul_wait(vcpu);
er               1306 arch/mips/kvm/emulate.c 			er = kvm_mips_emul_hypcall(vcpu, inst);
er               1362 arch/mips/kvm/emulate.c 				er = EMULATE_FAIL;
er               1534 arch/mips/kvm/emulate.c 			er = EMULATE_FAIL;
er               1567 arch/mips/kvm/emulate.c 					er = EMULATE_FAIL;
er               1578 arch/mips/kvm/emulate.c 			er = EMULATE_FAIL;
er               1585 arch/mips/kvm/emulate.c 	if (er == EMULATE_FAIL)
er               1595 arch/mips/kvm/emulate.c 	return er;
er               1603 arch/mips/kvm/emulate.c 	enum emulation_result er;
er               1613 arch/mips/kvm/emulate.c 	er = update_pc(vcpu, cause);
er               1614 arch/mips/kvm/emulate.c 	if (er == EMULATE_FAIL)
er               1615 arch/mips/kvm/emulate.c 		return er;
er               1684 arch/mips/kvm/emulate.c 	enum emulation_result er;
er               1697 arch/mips/kvm/emulate.c 	er = update_pc(vcpu, cause);
er               1698 arch/mips/kvm/emulate.c 	if (er == EMULATE_FAIL)
er               1699 arch/mips/kvm/emulate.c 		return er;
er               1802 arch/mips/kvm/emulate.c 	enum emulation_result er = EMULATE_DONE;
er               1814 arch/mips/kvm/emulate.c 	er = update_pc(vcpu, cause);
er               1815 arch/mips/kvm/emulate.c 	if (er == EMULATE_FAIL)
er               1816 arch/mips/kvm/emulate.c 		return er;
er               1888 arch/mips/kvm/emulate.c 		er = kvm_mips_guest_cache_op(protected_writeback_dcache_line,
er               1890 arch/mips/kvm/emulate.c 		if (er != EMULATE_DONE)
er               1901 arch/mips/kvm/emulate.c 		er = kvm_mips_guest_cache_op(protected_writeback_dcache_line,
er               1903 arch/mips/kvm/emulate.c 		if (er != EMULATE_DONE)
er               1905 arch/mips/kvm/emulate.c 		er = kvm_mips_guest_cache_op(protected_flush_icache_line,
er               1907 arch/mips/kvm/emulate.c 		if (er != EMULATE_DONE)
er               1917 arch/mips/kvm/emulate.c 		er = EMULATE_FAIL;
er               1922 arch/mips/kvm/emulate.c 	if (er == EMULATE_FAIL)
er               1925 arch/mips/kvm/emulate.c 	if (er == EMULATE_EXCEPT)
er               1926 arch/mips/kvm/emulate.c 		er = EMULATE_DONE;
er               1928 arch/mips/kvm/emulate.c 	return er;
er               1936 arch/mips/kvm/emulate.c 	enum emulation_result er = EMULATE_DONE;
er               1948 arch/mips/kvm/emulate.c 		er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
er               1955 arch/mips/kvm/emulate.c 		er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
er               1963 arch/mips/kvm/emulate.c 			er = kvm_mips_emulate_cache(inst, opc, cause, run,
er               1977 arch/mips/kvm/emulate.c 		er = EMULATE_FAIL;
er               1981 arch/mips/kvm/emulate.c 	return er;
er               2008 arch/mips/kvm/emulate.c 	enum emulation_result er = EMULATE_DONE;
er               2030 arch/mips/kvm/emulate.c 		er = EMULATE_FAIL;
er               2033 arch/mips/kvm/emulate.c 	return er;
er               2281 arch/mips/kvm/emulate.c 	enum emulation_result er = EMULATE_DONE;
er               2303 arch/mips/kvm/emulate.c 		er = EMULATE_FAIL;
er               2306 arch/mips/kvm/emulate.c 	return er;
er               2316 arch/mips/kvm/emulate.c 	enum emulation_result er = EMULATE_DONE;
er               2338 arch/mips/kvm/emulate.c 		er = EMULATE_FAIL;
er               2341 arch/mips/kvm/emulate.c 	return er;
er               2351 arch/mips/kvm/emulate.c 	enum emulation_result er = EMULATE_DONE;
er               2373 arch/mips/kvm/emulate.c 		er = EMULATE_FAIL;
er               2376 arch/mips/kvm/emulate.c 	return er;
er               2386 arch/mips/kvm/emulate.c 	enum emulation_result er = EMULATE_DONE;
er               2408 arch/mips/kvm/emulate.c 		er = EMULATE_FAIL;
er               2411 arch/mips/kvm/emulate.c 	return er;
er               2421 arch/mips/kvm/emulate.c 	enum emulation_result er = EMULATE_DONE;
er               2443 arch/mips/kvm/emulate.c 		er = EMULATE_FAIL;
er               2446 arch/mips/kvm/emulate.c 	return er;
er               2456 arch/mips/kvm/emulate.c 	enum emulation_result er = EMULATE_DONE;
er               2478 arch/mips/kvm/emulate.c 		er = EMULATE_FAIL;
er               2481 arch/mips/kvm/emulate.c 	return er;
er               2490 arch/mips/kvm/emulate.c 	enum emulation_result er = EMULATE_DONE;
er               2500 arch/mips/kvm/emulate.c 	er = update_pc(vcpu, cause);
er               2501 arch/mips/kvm/emulate.c 	if (er == EMULATE_FAIL)
er               2502 arch/mips/kvm/emulate.c 		return er;
er               2581 arch/mips/kvm/emulate.c 	enum emulation_result er = EMULATE_DONE;
er               2585 arch/mips/kvm/emulate.c 		er = EMULATE_FAIL;
er               2620 arch/mips/kvm/emulate.c 	return er;
er               2631 arch/mips/kvm/emulate.c 	enum emulation_result er = EMULATE_DONE;
er               2655 arch/mips/kvm/emulate.c 		er = EMULATE_FAIL;
er               2658 arch/mips/kvm/emulate.c 	return er;
er               2666 arch/mips/kvm/emulate.c 	enum emulation_result er = EMULATE_DONE;
er               2686 arch/mips/kvm/emulate.c 				er = EMULATE_PRIV_FAIL;
er               2702 arch/mips/kvm/emulate.c 				er = EMULATE_PRIV_FAIL;
er               2716 arch/mips/kvm/emulate.c 				er = EMULATE_PRIV_FAIL;
er               2727 arch/mips/kvm/emulate.c 			er = EMULATE_PRIV_FAIL;
er               2736 arch/mips/kvm/emulate.c 			er = EMULATE_PRIV_FAIL;
er               2739 arch/mips/kvm/emulate.c 			er = EMULATE_PRIV_FAIL;
er               2744 arch/mips/kvm/emulate.c 	if (er == EMULATE_PRIV_FAIL)
er               2747 arch/mips/kvm/emulate.c 	return er;
er               2763 arch/mips/kvm/emulate.c 	enum emulation_result er = EMULATE_DONE;
er               2783 arch/mips/kvm/emulate.c 			er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
er               2785 arch/mips/kvm/emulate.c 			er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
er               2789 arch/mips/kvm/emulate.c 			er = EMULATE_FAIL;
er               2800 arch/mips/kvm/emulate.c 				er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
er               2803 arch/mips/kvm/emulate.c 				er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
er               2808 arch/mips/kvm/emulate.c 				er = EMULATE_FAIL;
er               2822 arch/mips/kvm/emulate.c 				er = EMULATE_FAIL;
er               2827 arch/mips/kvm/emulate.c 	return er;
er               1287 arch/mips/kvm/mips.c 	enum emulation_result er = EMULATE_DONE;
er               1319 arch/mips/kvm/mips.c 		er = kvm_mips_check_privilege(cause, opc, run, vcpu);
er               1320 arch/mips/kvm/mips.c 		if (er == EMULATE_PRIV_FAIL) {
er               1322 arch/mips/kvm/mips.c 		} else if (er == EMULATE_FAIL) {
er               1444 arch/mips/kvm/mips.c 	if (er == EMULATE_DONE && !(ret & RESUME_HOST))
er                 73 arch/mips/kvm/trap_emul.c 	enum emulation_result er = EMULATE_DONE;
er                 84 arch/mips/kvm/trap_emul.c 			er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
er                 88 arch/mips/kvm/trap_emul.c 			er = EMULATE_DONE;
er                 91 arch/mips/kvm/trap_emul.c 		er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
er                 94 arch/mips/kvm/trap_emul.c 	switch (er) {
er                122 arch/mips/kvm/trap_emul.c 	enum emulation_result er;
er                142 arch/mips/kvm/trap_emul.c 	er = kvm_mips_emulate_load(inst, cause, run, vcpu);
er                143 arch/mips/kvm/trap_emul.c 	if (er == EMULATE_FAIL) {
er                155 arch/mips/kvm/trap_emul.c 	enum emulation_result er;
er                169 arch/mips/kvm/trap_emul.c 	er = kvm_mips_emulate_store(inst, cause, run, vcpu);
er                170 arch/mips/kvm/trap_emul.c 	if (er == EMULATE_FAIL) {
er                255 arch/mips/kvm/trap_emul.c 	enum emulation_result er = EMULATE_DONE;
er                279 arch/mips/kvm/trap_emul.c 		er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu, store);
er                280 arch/mips/kvm/trap_emul.c 		if (er == EMULATE_DONE)
er                365 arch/mips/kvm/trap_emul.c 	enum emulation_result er = EMULATE_DONE;
er                368 arch/mips/kvm/trap_emul.c 	er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
er                369 arch/mips/kvm/trap_emul.c 	if (er == EMULATE_DONE)
er                383 arch/mips/kvm/trap_emul.c 	enum emulation_result er = EMULATE_DONE;
er                386 arch/mips/kvm/trap_emul.c 	er = kvm_mips_handle_ri(cause, opc, run, vcpu);
er                387 arch/mips/kvm/trap_emul.c 	if (er == EMULATE_DONE)
er                401 arch/mips/kvm/trap_emul.c 	enum emulation_result er = EMULATE_DONE;
er                404 arch/mips/kvm/trap_emul.c 	er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
er                405 arch/mips/kvm/trap_emul.c 	if (er == EMULATE_DONE)
er                419 arch/mips/kvm/trap_emul.c 	enum emulation_result er = EMULATE_DONE;
er                422 arch/mips/kvm/trap_emul.c 	er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu);
er                423 arch/mips/kvm/trap_emul.c 	if (er == EMULATE_DONE) {
er                437 arch/mips/kvm/trap_emul.c 	enum emulation_result er = EMULATE_DONE;
er                440 arch/mips/kvm/trap_emul.c 	er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu);
er                441 arch/mips/kvm/trap_emul.c 	if (er == EMULATE_DONE) {
er                455 arch/mips/kvm/trap_emul.c 	enum emulation_result er = EMULATE_DONE;
er                458 arch/mips/kvm/trap_emul.c 	er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu);
er                459 arch/mips/kvm/trap_emul.c 	if (er == EMULATE_DONE) {
er                480 arch/mips/kvm/trap_emul.c 	enum emulation_result er = EMULATE_DONE;
er                489 arch/mips/kvm/trap_emul.c 		er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
er                492 arch/mips/kvm/trap_emul.c 		er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu);
er                496 arch/mips/kvm/trap_emul.c 		er = EMULATE_DONE;
er                499 arch/mips/kvm/trap_emul.c 	switch (er) {
er                906 arch/mips/kvm/vz.c 	enum emulation_result er = EMULATE_DONE;
er                916 arch/mips/kvm/vz.c 	er = update_pc(vcpu, cause);
er                917 arch/mips/kvm/vz.c 	if (er == EMULATE_FAIL)
er                918 arch/mips/kvm/vz.c 		return er;
er                923 arch/mips/kvm/vz.c 			er = kvm_mips_emul_wait(vcpu);
er                926 arch/mips/kvm/vz.c 			er = EMULATE_FAIL;
er                979 arch/mips/kvm/vz.c 				er = EMULATE_FAIL;
er                982 arch/mips/kvm/vz.c 			if (er != EMULATE_FAIL) {
er               1043 arch/mips/kvm/vz.c 				er = EMULATE_FAIL;
er               1048 arch/mips/kvm/vz.c 			er = EMULATE_FAIL;
er               1053 arch/mips/kvm/vz.c 	if (er == EMULATE_FAIL) {
er               1060 arch/mips/kvm/vz.c 	return er;
er               1068 arch/mips/kvm/vz.c 	enum emulation_result er = EMULATE_DONE;
er               1079 arch/mips/kvm/vz.c 	er = update_pc(vcpu, cause);
er               1080 arch/mips/kvm/vz.c 	if (er == EMULATE_FAIL)
er               1081 arch/mips/kvm/vz.c 		return er;
er               1135 arch/mips/kvm/vz.c 	enum emulation_result er = EMULATE_DONE;
er               1153 arch/mips/kvm/vz.c 		er = kvm_vz_gpsi_cop0(inst, opc, cause, run, vcpu);
er               1158 arch/mips/kvm/vz.c 		er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
er               1166 arch/mips/kvm/vz.c 			er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
er               1191 arch/mips/kvm/vz.c 			er = update_pc(vcpu, cause);
er               1203 arch/mips/kvm/vz.c 		er = EMULATE_FAIL;
er               1207 arch/mips/kvm/vz.c 	return er;
er               1213 arch/mips/kvm/vz.c 	enum emulation_result er = EMULATE_DONE;
er               1322 arch/mips/kvm/vz.c 			er = EMULATE_FAIL;
er               1325 arch/mips/kvm/vz.c 		if (er != EMULATE_FAIL)
er               1326 arch/mips/kvm/vz.c 			er = update_pc(vcpu, cause);
er               1330 arch/mips/kvm/vz.c 		er = EMULATE_FAIL;
er               1333 arch/mips/kvm/vz.c 	return er;
er               1351 arch/mips/kvm/vz.c 	enum emulation_result er;
er               1367 arch/mips/kvm/vz.c 	er = update_pc(vcpu, cause);
er               1368 arch/mips/kvm/vz.c 	if (er == EMULATE_FAIL)
er               1369 arch/mips/kvm/vz.c 		return er;
er               1371 arch/mips/kvm/vz.c 	er = kvm_mips_emul_hypcall(vcpu, inst);
er               1372 arch/mips/kvm/vz.c 	if (er == EMULATE_FAIL)
er               1375 arch/mips/kvm/vz.c 	return er;
er               1402 arch/mips/kvm/vz.c 	enum emulation_result er = EMULATE_DONE;
er               1411 arch/mips/kvm/vz.c 		er = kvm_trap_vz_handle_gpsi(cause, opc, vcpu);
er               1415 arch/mips/kvm/vz.c 		er = kvm_trap_vz_handle_gsfc(cause, opc, vcpu);
er               1419 arch/mips/kvm/vz.c 		er = kvm_trap_vz_handle_hc(cause, opc, vcpu);
er               1423 arch/mips/kvm/vz.c 		er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
er               1428 arch/mips/kvm/vz.c 		er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
er               1433 arch/mips/kvm/vz.c 		er = kvm_trap_vz_handle_ghfc(cause, opc, vcpu);
er               1437 arch/mips/kvm/vz.c 		er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
er               1442 arch/mips/kvm/vz.c 		er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
er               1448 arch/mips/kvm/vz.c 	if (er == EMULATE_DONE) {
er               1450 arch/mips/kvm/vz.c 	} else if (er == EMULATE_HYPERCALL) {
er               1470 arch/mips/kvm/vz.c 	enum emulation_result er = EMULATE_FAIL;
er               1486 arch/mips/kvm/vz.c 		er = EMULATE_DONE;
er               1490 arch/mips/kvm/vz.c 	switch (er) {
er               1543 arch/mips/kvm/vz.c 	enum emulation_result er = EMULATE_DONE;
er               1563 arch/mips/kvm/vz.c 		er = kvm_mips_emulate_load(inst, cause, run, vcpu);
er               1564 arch/mips/kvm/vz.c 		if (er == EMULATE_FAIL) {
er               1571 arch/mips/kvm/vz.c 	if (er == EMULATE_DONE) {
er               1573 arch/mips/kvm/vz.c 	} else if (er == EMULATE_DO_MMIO) {
er               1590 arch/mips/kvm/vz.c 	enum emulation_result er = EMULATE_DONE;
er               1610 arch/mips/kvm/vz.c 		er = kvm_mips_emulate_store(inst, cause, run, vcpu);
er               1611 arch/mips/kvm/vz.c 		if (er == EMULATE_FAIL) {
er               1618 arch/mips/kvm/vz.c 	if (er == EMULATE_DONE) {
er               1620 arch/mips/kvm/vz.c 	} else if (er == EMULATE_DO_MMIO) {
er                956 arch/powerpc/kvm/book3s_pr.c 	enum emulation_result er = EMULATE_FAIL;
er                959 arch/powerpc/kvm/book3s_pr.c 		er = kvmppc_emulate_instruction(vcpu->run, vcpu);
er                961 arch/powerpc/kvm/book3s_pr.c 	if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) {
er               1059 arch/powerpc/kvm/book3s_pr.c 	enum emulation_result er;
er               1091 arch/powerpc/kvm/book3s_pr.c 	er = kvmppc_emulate_instruction(run, vcpu);
er               1092 arch/powerpc/kvm/book3s_pr.c 	switch (er) {
er                805 arch/powerpc/kvm/booke.c 	enum emulation_result er;
er                807 arch/powerpc/kvm/booke.c 	er = kvmppc_emulate_instruction(run, vcpu);
er                808 arch/powerpc/kvm/booke.c 	switch (er) {
er                283 arch/powerpc/kvm/powerpc.c 	enum emulation_result er;
er                286 arch/powerpc/kvm/powerpc.c 	er = kvmppc_emulate_loadstore(vcpu);
er                287 arch/powerpc/kvm/powerpc.c 	switch (er) {
er                 55 arch/powerpc/platforms/4xx/uic.c 	u32 er, sr;
er                 62 arch/powerpc/platforms/4xx/uic.c 	er = mfdcr(uic->dcrbase + UIC_ER);
er                 63 arch/powerpc/platforms/4xx/uic.c 	er |= sr;
er                 64 arch/powerpc/platforms/4xx/uic.c 	mtdcr(uic->dcrbase + UIC_ER, er);
er                 73 arch/powerpc/platforms/4xx/uic.c 	u32 er;
er                 76 arch/powerpc/platforms/4xx/uic.c 	er = mfdcr(uic->dcrbase + UIC_ER);
er                 77 arch/powerpc/platforms/4xx/uic.c 	er &= ~(1 << (31 - src));
er                 78 arch/powerpc/platforms/4xx/uic.c 	mtdcr(uic->dcrbase + UIC_ER, er);
er                 98 arch/powerpc/platforms/4xx/uic.c 	u32 er, sr;
er                102 arch/powerpc/platforms/4xx/uic.c 	er = mfdcr(uic->dcrbase + UIC_ER);
er                103 arch/powerpc/platforms/4xx/uic.c 	er &= ~sr;
er                104 arch/powerpc/platforms/4xx/uic.c 	mtdcr(uic->dcrbase + UIC_ER, er);
er                117 arch/x86/events/core.c 	struct extra_reg *er;
er                124 arch/x86/events/core.c 	for (er = x86_pmu.extra_regs; er->msr; er++) {
er                125 arch/x86/events/core.c 		if (er->event != (config & er->config_mask))
er                127 arch/x86/events/core.c 		if (event->attr.config1 & ~er->valid_mask)
er                130 arch/x86/events/core.c 		if (!er->extra_msr_access)
er                133 arch/x86/events/core.c 		reg->idx = er->idx;
er                135 arch/x86/events/core.c 		reg->reg = er->msr;
er               4525 arch/x86/events/intel/core.c 	struct extra_reg *er;
er               5151 arch/x86/events/intel/core.c 		for (er = x86_pmu.extra_regs; er->msr; er++) {
er               5152 arch/x86/events/intel/core.c 			er->extra_msr_access = check_msr(er->msr, 0x11UL);
er               5154 arch/x86/events/intel/core.c 			if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
er                144 arch/x86/events/intel/uncore.c 	struct intel_uncore_extra_reg *er;
er                159 arch/x86/events/intel/uncore.c 	er = &box->shared_regs[reg1->idx];
er                160 arch/x86/events/intel/uncore.c 	raw_spin_lock_irqsave(&er->lock, flags);
er                161 arch/x86/events/intel/uncore.c 	if (!atomic_read(&er->ref) ||
er                162 arch/x86/events/intel/uncore.c 	    (er->config1 == reg1->config && er->config2 == reg2->config)) {
er                163 arch/x86/events/intel/uncore.c 		atomic_inc(&er->ref);
er                164 arch/x86/events/intel/uncore.c 		er->config1 = reg1->config;
er                165 arch/x86/events/intel/uncore.c 		er->config2 = reg2->config;
er                168 arch/x86/events/intel/uncore.c 	raw_spin_unlock_irqrestore(&er->lock, flags);
er                181 arch/x86/events/intel/uncore.c 	struct intel_uncore_extra_reg *er;
er                195 arch/x86/events/intel/uncore.c 	er = &box->shared_regs[reg1->idx];
er                196 arch/x86/events/intel/uncore.c 	atomic_dec(&er->ref);
er                202 arch/x86/events/intel/uncore.c 	struct intel_uncore_extra_reg *er;
er                206 arch/x86/events/intel/uncore.c 	er = &box->shared_regs[idx];
er                208 arch/x86/events/intel/uncore.c 	raw_spin_lock_irqsave(&er->lock, flags);
er                209 arch/x86/events/intel/uncore.c 	config = er->config;
er                210 arch/x86/events/intel/uncore.c 	raw_spin_unlock_irqrestore(&er->lock, flags);
er                555 arch/x86/events/intel/uncore_nhmex.c 	struct intel_uncore_extra_reg *er;
er                561 arch/x86/events/intel/uncore_nhmex.c 		er = &box->shared_regs[idx];
er                562 arch/x86/events/intel/uncore_nhmex.c 		raw_spin_lock_irqsave(&er->lock, flags);
er                563 arch/x86/events/intel/uncore_nhmex.c 		if (!atomic_read(&er->ref) || er->config == config) {
er                564 arch/x86/events/intel/uncore_nhmex.c 			atomic_inc(&er->ref);
er                565 arch/x86/events/intel/uncore_nhmex.c 			er->config = config;
er                568 arch/x86/events/intel/uncore_nhmex.c 		raw_spin_unlock_irqrestore(&er->lock, flags);
er                586 arch/x86/events/intel/uncore_nhmex.c 	er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
er                588 arch/x86/events/intel/uncore_nhmex.c 	raw_spin_lock_irqsave(&er->lock, flags);
er                590 arch/x86/events/intel/uncore_nhmex.c 	if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) {
er                597 arch/x86/events/intel/uncore_nhmex.c 	if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) {
er                598 arch/x86/events/intel/uncore_nhmex.c 		atomic_add(1 << (idx * 8), &er->ref);
er                605 arch/x86/events/intel/uncore_nhmex.c 		er->config &= ~mask;
er                606 arch/x86/events/intel/uncore_nhmex.c 		er->config |= (config & mask);
er                609 arch/x86/events/intel/uncore_nhmex.c 	raw_spin_unlock_irqrestore(&er->lock, flags);
er                616 arch/x86/events/intel/uncore_nhmex.c 	struct intel_uncore_extra_reg *er;
er                619 arch/x86/events/intel/uncore_nhmex.c 		er = &box->shared_regs[idx];
er                620 arch/x86/events/intel/uncore_nhmex.c 		atomic_dec(&er->ref);
er                625 arch/x86/events/intel/uncore_nhmex.c 	er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
er                626 arch/x86/events/intel/uncore_nhmex.c 	atomic_sub(1 << (idx * 8), &er->ref);
er                758 arch/x86/events/intel/uncore_nhmex.c static int nhmex_mbox_extra_reg_idx(struct extra_reg *er)
er                760 arch/x86/events/intel/uncore_nhmex.c 	if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
er                761 arch/x86/events/intel/uncore_nhmex.c 		return er->idx;
er                762 arch/x86/events/intel/uncore_nhmex.c 	return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd;
er                770 arch/x86/events/intel/uncore_nhmex.c 	struct extra_reg *er;
er                778 arch/x86/events/intel/uncore_nhmex.c 	for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) {
er                779 arch/x86/events/intel/uncore_nhmex.c 		if (er->event != (event->hw.config & er->config_mask))
er                781 arch/x86/events/intel/uncore_nhmex.c 		if (event->attr.config1 & ~er->valid_mask)
er                784 arch/x86/events/intel/uncore_nhmex.c 		msr = er->msr + type->msr_offset * box->pmu->pmu_idx;
er                785 arch/x86/events/intel/uncore_nhmex.c 		if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff))
er                789 arch/x86/events/intel/uncore_nhmex.c 		if (er->idx == EXTRA_REG_NHMEX_M_PLD)
er                796 arch/x86/events/intel/uncore_nhmex.c 		reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8);
er                821 arch/x86/events/intel/uncore_nhmex.c 	struct intel_uncore_extra_reg *er;
er                828 arch/x86/events/intel/uncore_nhmex.c 	er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
er                829 arch/x86/events/intel/uncore_nhmex.c 	raw_spin_lock_irqsave(&er->lock, flags);
er                830 arch/x86/events/intel/uncore_nhmex.c 	config = er->config;
er                831 arch/x86/events/intel/uncore_nhmex.c 	raw_spin_unlock_irqrestore(&er->lock, flags);
er                983 arch/x86/events/intel/uncore_nhmex.c 	struct intel_uncore_extra_reg *er;
er               1001 arch/x86/events/intel/uncore_nhmex.c 	er = &box->shared_regs[er_idx];
er               1002 arch/x86/events/intel/uncore_nhmex.c 	raw_spin_lock_irqsave(&er->lock, flags);
er               1004 arch/x86/events/intel/uncore_nhmex.c 		if (!atomic_read(&er->ref) || er->config == reg1->config) {
er               1005 arch/x86/events/intel/uncore_nhmex.c 			atomic_inc(&er->ref);
er               1006 arch/x86/events/intel/uncore_nhmex.c 			er->config = reg1->config;
er               1015 arch/x86/events/intel/uncore_nhmex.c 		if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) ||
er               1016 arch/x86/events/intel/uncore_nhmex.c 				!((er->config ^ config1) & mask)) {
er               1017 arch/x86/events/intel/uncore_nhmex.c 			atomic_add(1 << ((idx - 2) * 8), &er->ref);
er               1018 arch/x86/events/intel/uncore_nhmex.c 			er->config &= ~mask;
er               1019 arch/x86/events/intel/uncore_nhmex.c 			er->config |= config1 & mask;
er               1023 arch/x86/events/intel/uncore_nhmex.c 		if (!atomic_read(&er->ref) ||
er               1024 arch/x86/events/intel/uncore_nhmex.c 				(er->config == (hwc->config >> 32) &&
er               1025 arch/x86/events/intel/uncore_nhmex.c 				 er->config1 == reg1->config &&
er               1026 arch/x86/events/intel/uncore_nhmex.c 				 er->config2 == reg2->config)) {
er               1027 arch/x86/events/intel/uncore_nhmex.c 			atomic_inc(&er->ref);
er               1028 arch/x86/events/intel/uncore_nhmex.c 			er->config = (hwc->config >> 32);
er               1029 arch/x86/events/intel/uncore_nhmex.c 			er->config1 = reg1->config;
er               1030 arch/x86/events/intel/uncore_nhmex.c 			er->config2 = reg2->config;
er               1034 arch/x86/events/intel/uncore_nhmex.c 	raw_spin_unlock_irqrestore(&er->lock, flags);
er               1064 arch/x86/events/intel/uncore_nhmex.c 	struct intel_uncore_extra_reg *er;
er               1077 arch/x86/events/intel/uncore_nhmex.c 	er = &box->shared_regs[er_idx];
er               1079 arch/x86/events/intel/uncore_nhmex.c 		atomic_sub(1 << ((idx - 2) * 8), &er->ref);
er               1081 arch/x86/events/intel/uncore_nhmex.c 		atomic_dec(&er->ref);
er                844 arch/x86/events/intel/uncore_snbep.c 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
er                852 arch/x86/events/intel/uncore_snbep.c 			atomic_sub(1 << (i * 6), &er->ref);
er                862 arch/x86/events/intel/uncore_snbep.c 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
er                870 arch/x86/events/intel/uncore_snbep.c 	raw_spin_lock_irqsave(&er->lock, flags);
er                878 arch/x86/events/intel/uncore_snbep.c 		if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
er                879 arch/x86/events/intel/uncore_snbep.c 		    !((reg1->config ^ er->config) & mask)) {
er                880 arch/x86/events/intel/uncore_snbep.c 			atomic_add(1 << (i * 6), &er->ref);
er                881 arch/x86/events/intel/uncore_snbep.c 			er->config &= ~mask;
er                882 arch/x86/events/intel/uncore_snbep.c 			er->config |= reg1->config & mask;
er                888 arch/x86/events/intel/uncore_snbep.c 	raw_spin_unlock_irqrestore(&er->lock, flags);
er                899 arch/x86/events/intel/uncore_snbep.c 			atomic_sub(1 << (i * 6), &er->ref);
er                929 arch/x86/events/intel/uncore_snbep.c 	struct extra_reg *er;
er                932 arch/x86/events/intel/uncore_snbep.c 	for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
er                933 arch/x86/events/intel/uncore_snbep.c 		if (er->event != (event->hw.config & er->config_mask))
er                935 arch/x86/events/intel/uncore_snbep.c 		idx |= er->idx;
er                993 arch/x86/events/intel/uncore_snbep.c 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
er               1004 arch/x86/events/intel/uncore_snbep.c 	raw_spin_lock_irqsave(&er->lock, flags);
er               1005 arch/x86/events/intel/uncore_snbep.c 	if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
er               1006 arch/x86/events/intel/uncore_snbep.c 	    !((config1 ^ er->config) & mask)) {
er               1007 arch/x86/events/intel/uncore_snbep.c 		atomic_add(1 << (idx * 8), &er->ref);
er               1008 arch/x86/events/intel/uncore_snbep.c 		er->config &= ~mask;
er               1009 arch/x86/events/intel/uncore_snbep.c 		er->config |= config1 & mask;
er               1012 arch/x86/events/intel/uncore_snbep.c 	raw_spin_unlock_irqrestore(&er->lock, flags);
er               1034 arch/x86/events/intel/uncore_snbep.c 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
er               1039 arch/x86/events/intel/uncore_snbep.c 	atomic_sub(1 << (reg1->idx * 8), &er->ref);
er               1613 arch/x86/events/intel/uncore_snbep.c 	struct extra_reg *er;
er               1616 arch/x86/events/intel/uncore_snbep.c 	for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
er               1617 arch/x86/events/intel/uncore_snbep.c 		if (er->event != (event->hw.config & er->config_mask))
er               1619 arch/x86/events/intel/uncore_snbep.c 		idx |= er->idx;
er               2041 arch/x86/events/intel/uncore_snbep.c 	struct extra_reg *er;
er               2044 arch/x86/events/intel/uncore_snbep.c 	for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
er               2045 arch/x86/events/intel/uncore_snbep.c 		if (er->event != (event->hw.config & er->config_mask))
er               2047 arch/x86/events/intel/uncore_snbep.c 		idx |= er->idx;
er               2596 arch/x86/events/intel/uncore_snbep.c 	struct extra_reg *er;
er               2599 arch/x86/events/intel/uncore_snbep.c 	for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
er               2600 arch/x86/events/intel/uncore_snbep.c 		if (er->event != (event->hw.config & er->config_mask))
er               2602 arch/x86/events/intel/uncore_snbep.c 		idx |= er->idx;
er               3489 arch/x86/events/intel/uncore_snbep.c 	struct extra_reg *er;
er               3492 arch/x86/events/intel/uncore_snbep.c 	for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
er               3493 arch/x86/events/intel/uncore_snbep.c 		if (er->event != (event->hw.config & er->config_mask))
er               3495 arch/x86/events/intel/uncore_snbep.c 		idx |= er->idx;
er                262 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c 	const struct nvkm_enum *er, *eu, *ec;
er                268 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c 	er = nvkm_enum_find(gf100_fifo_fault_reason, info->reason);
er                302 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c 		   info->reason, er ? er->name : "", chan ? chan->chid : -1,
er                470 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c 	const struct nvkm_enum *er, *ee, *ec, *ea;
er                477 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c 	er = nvkm_enum_find(fifo->func->fault.reason, info->reason);
er                527 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c 		   info->reason, er ? er->name : "", chan ? chan->chid : -1,
er                 76 drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c 		u16 er;
er                195 drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c 		srowexp = hdcs->w - (cycles + hdcs->exp.er + 13) / ct;
er                197 drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c 		mnct = (hdcs->exp.er + 12 + ct - 1) / ct;
er                201 drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c 		srowexp = cp - hdcs->exp.er - 6 - cycles;
er                203 drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c 		mnct = (hdcs->exp.er + 5 + ct - 1) / ct;
er                384 drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c 	hdcs->exp.er = 100;
er                448 drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c 	hdcs->exp.er = 96;
er               3442 drivers/mtd/spi-nor/spi-nor.c 		const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i];
er               3446 drivers/mtd/spi-nor/spi-nor.c 		half = bfpt.dwords[er->dword] >> er->shift;
er                581 drivers/net/can/usb/peak_usb/pcan_usb_fd.c 	struct pucan_error_msg *er = (struct pucan_error_msg *)rx_msg;
er                582 drivers/net/can/usb/peak_usb/pcan_usb_fd.c 	struct peak_usb_device *dev = usb_if->dev[pucan_ermsg_get_channel(er)];
er                587 drivers/net/can/usb/peak_usb/pcan_usb_fd.c 	pdev->bec.txerr = er->tx_err_cnt;
er                588 drivers/net/can/usb/peak_usb/pcan_usb_fd.c 	pdev->bec.rxerr = er->rx_err_cnt;
er                557 drivers/net/can/usb/peak_usb/pcan_usb_pro.c 				     struct pcan_usb_pro_rxstatus *er)
er                559 drivers/net/can/usb/peak_usb/pcan_usb_pro.c 	const u16 raw_status = le16_to_cpu(er->status);
er                560 drivers/net/can/usb/peak_usb/pcan_usb_pro.c 	const unsigned int ctrl_idx = (er->channel >> 4) & 0x0f;
er                588 drivers/net/can/usb/peak_usb/pcan_usb_pro.c 		u32 rx_err_cnt = (le32_to_cpu(er->err_frm) & 0x00ff0000) >> 16;
er                589 drivers/net/can/usb/peak_usb/pcan_usb_pro.c 		u32 tx_err_cnt = (le32_to_cpu(er->err_frm) & 0xff000000) >> 24;
er                663 drivers/net/can/usb/peak_usb/pcan_usb_pro.c 	peak_usb_get_ts_time(&usb_if->time_ref, le32_to_cpu(er->ts32), &hwts->hwtstamp);
er               1641 drivers/net/ethernet/marvell/mv643xx_eth.c mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
er               1645 drivers/net/ethernet/marvell/mv643xx_eth.c 	er->rx_max_pending = 4096;
er               1646 drivers/net/ethernet/marvell/mv643xx_eth.c 	er->tx_max_pending = 4096;
er               1648 drivers/net/ethernet/marvell/mv643xx_eth.c 	er->rx_pending = mp->rx_ring_size;
er               1649 drivers/net/ethernet/marvell/mv643xx_eth.c 	er->tx_pending = mp->tx_ring_size;
er               1653 drivers/net/ethernet/marvell/mv643xx_eth.c mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
er               1657 drivers/net/ethernet/marvell/mv643xx_eth.c 	if (er->rx_mini_pending || er->rx_jumbo_pending)
er               1660 drivers/net/ethernet/marvell/mv643xx_eth.c 	mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096;
er               1661 drivers/net/ethernet/marvell/mv643xx_eth.c 	mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending,
er               1663 drivers/net/ethernet/marvell/mv643xx_eth.c 	if (mp->tx_ring_size != er->tx_pending)
er               1665 drivers/net/ethernet/marvell/mv643xx_eth.c 			    mp->tx_ring_size, er->tx_pending);
er               1682 drivers/scsi/mvumi.c 		struct mvumi_event_req *er = buffer;
er               1683 drivers/scsi/mvumi.c 		count = er->count;
er               1691 drivers/scsi/mvumi.c 			param = &er->events[i];
er                629 drivers/target/iscsi/iscsi_target_parameters.c 	struct iscsi_extra_response *er, *er_tmp;
er                631 drivers/target/iscsi/iscsi_target_parameters.c 	list_for_each_entry_safe(er, er_tmp, &param_list->extra_response_list,
er                633 drivers/target/iscsi/iscsi_target_parameters.c 		list_del(&er->er_list);
er                634 drivers/target/iscsi/iscsi_target_parameters.c 		kfree(er);
er               1431 drivers/target/iscsi/iscsi_target_parameters.c 	struct iscsi_extra_response *er;
er               1471 drivers/target/iscsi/iscsi_target_parameters.c 	list_for_each_entry(er, &param_list->extra_response_list, er_list) {
er               1472 drivers/target/iscsi/iscsi_target_parameters.c 		*length += sprintf(output_buf, "%s=%s", er->key, er->value);
er               1475 drivers/target/iscsi/iscsi_target_parameters.c 		pr_debug("Sending key: %s=%s\n", er->key, er->value);
er                348 fs/gfs2/xattr.c 	struct gfs2_ea_request *er = ei->ei_er;
er                374 fs/gfs2/xattr.c 	if (er->er_data_len) {
er                375 fs/gfs2/xattr.c 		if (ei->ei_size + ea_size > er->er_data_len)
er                378 fs/gfs2/xattr.c 		memcpy(er->er_data + ei->ei_size, prefix, l);
er                379 fs/gfs2/xattr.c 		memcpy(er->er_data + ei->ei_size + l, GFS2_EA2NAME(ea),
er                381 fs/gfs2/xattr.c 		er->er_data[ei->ei_size + ea_size - 1] = 0;
er                401 fs/gfs2/xattr.c 	struct gfs2_ea_request er;
er                405 fs/gfs2/xattr.c 	memset(&er, 0, sizeof(struct gfs2_ea_request));
er                407 fs/gfs2/xattr.c 		er.er_data = buffer;
er                408 fs/gfs2/xattr.c 		er.er_data_len = size;
er                416 fs/gfs2/xattr.c 		struct ea_list ei = { .ei_er = &er, .ei_size = 0 };
er                661 fs/gfs2/xattr.c 		    struct gfs2_ea_request *er)
er                666 fs/gfs2/xattr.c 	ea->ea_data_len = cpu_to_be32(er->er_data_len);
er                667 fs/gfs2/xattr.c 	ea->ea_name_len = er->er_name_len;
er                668 fs/gfs2/xattr.c 	ea->ea_type = er->er_type;
er                671 fs/gfs2/xattr.c 	memcpy(GFS2_EA2NAME(ea), er->er_name, er->er_name_len);
er                673 fs/gfs2/xattr.c 	if (GFS2_EAREQ_SIZE_STUFFED(er) <= sdp->sd_jbsize) {
er                675 fs/gfs2/xattr.c 		memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len);
er                678 fs/gfs2/xattr.c 		const char *data = er->er_data;
er                679 fs/gfs2/xattr.c 		unsigned int data_len = er->er_data_len;
er                683 fs/gfs2/xattr.c 		ea->ea_num_ptrs = DIV_ROUND_UP(er->er_data_len, sdp->sd_jbsize);
er                721 fs/gfs2/xattr.c 				   struct gfs2_ea_request *er, void *private);
er                723 fs/gfs2/xattr.c static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
er                748 fs/gfs2/xattr.c 	error = skeleton_call(ip, er, private);
er                764 fs/gfs2/xattr.c static int ea_init_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
er                775 fs/gfs2/xattr.c 	error = ea_write(ip, GFS2_EA_BH2FIRST(bh), er);
er                793 fs/gfs2/xattr.c 	struct gfs2_ea_request er;
er                797 fs/gfs2/xattr.c 	er.er_type = type;
er                798 fs/gfs2/xattr.c 	er.er_name = name;
er                799 fs/gfs2/xattr.c 	er.er_name_len = strlen(name);
er                800 fs/gfs2/xattr.c 	er.er_data = (void *)data;
er                801 fs/gfs2/xattr.c 	er.er_data_len = size;
er                803 fs/gfs2/xattr.c 	if (GFS2_EAREQ_SIZE_STUFFED(&er) > jbsize)
er                804 fs/gfs2/xattr.c 		blks += DIV_ROUND_UP(er.er_data_len, jbsize);
er                806 fs/gfs2/xattr.c 	return ea_alloc_skeleton(ip, &er, blks, ea_init_i, NULL);
er                863 fs/gfs2/xattr.c 	struct gfs2_ea_request *er = es->es_er;
er                875 fs/gfs2/xattr.c 	ea_write(ip, ea, er);
er                888 fs/gfs2/xattr.c 			       struct gfs2_ea_request *er, void *private)
er                899 fs/gfs2/xattr.c 	error = ea_write(ip, ea, er);
er                956 fs/gfs2/xattr.c static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
er               1017 fs/gfs2/xattr.c 	error = ea_write(ip, GFS2_EA_BH2FIRST(newbh), er);
er               1033 fs/gfs2/xattr.c 	struct gfs2_ea_request er;
er               1038 fs/gfs2/xattr.c 	er.er_type = type;
er               1039 fs/gfs2/xattr.c 	er.er_name = name;
er               1040 fs/gfs2/xattr.c 	er.er_data = (void *)value;
er               1041 fs/gfs2/xattr.c 	er.er_name_len = strlen(name);
er               1042 fs/gfs2/xattr.c 	er.er_data_len = size;
er               1045 fs/gfs2/xattr.c 	es.es_er = &er;
er               1056 fs/gfs2/xattr.c 	if (GFS2_EAREQ_SIZE_STUFFED(&er) > GFS2_SB(&ip->i_inode)->sd_jbsize)
er               1057 fs/gfs2/xattr.c 		blks += DIV_ROUND_UP(er.er_data_len, GFS2_SB(&ip->i_inode)->sd_jbsize);
er               1059 fs/gfs2/xattr.c 	return ea_alloc_skeleton(ip, &er, blks, ea_set_block, el);
er                 24 fs/gfs2/xattr.h #define GFS2_EAREQ_SIZE_STUFFED(er) \
er                 25 fs/gfs2/xattr.h ALIGN(sizeof(struct gfs2_ea_header) + (er)->er_name_len + (er)->er_data_len, 8)
er                 55 fs/ocfs2/xattr.c 	struct ocfs2_extent_rec		er;
er                719 fs/ubifs/tnc_commit.c 	int i, err = 0, lnum, er;
er                724 fs/ubifs/tnc_commit.c 		er = ubifs_change_one_lp(c, lnum, LPROPS_NC, LPROPS_NC, 0,
er                727 fs/ubifs/tnc_commit.c 			err = er;
er               1126 net/netrom/af_netrom.c 	int er;
er               1140 net/netrom/af_netrom.c 	if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) {
er               1142 net/netrom/af_netrom.c 		return er;
er               1153 net/netrom/af_netrom.c 	er = skb_copy_datagram_msg(skb, 0, msg, copied);
er               1154 net/netrom/af_netrom.c 	if (er < 0) {
er               1157 net/netrom/af_netrom.c 		return er;
er               1200 net/rose/af_rose.c 	int n, er, qbit;
er               1210 net/rose/af_rose.c 	if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL)
er               1211 net/rose/af_rose.c 		return er;
er               2936 tools/perf/builtin-sched.c 	struct evsel_runtime *er;
er               2939 tools/perf/builtin-sched.c 		er = perf_evsel__get_runtime(evsel);
er               2940 tools/perf/builtin-sched.c 		if (er == NULL) {