e                 351 arch/alpha/include/asm/pgtable.h #define pte_ERROR(e) \
e                 352 arch/alpha/include/asm/pgtable.h 	printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
e                 353 arch/alpha/include/asm/pgtable.h #define pmd_ERROR(e) \
e                 354 arch/alpha/include/asm/pgtable.h 	printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
e                 355 arch/alpha/include/asm/pgtable.h #define pgd_ERROR(e) \
e                 356 arch/alpha/include/asm/pgtable.h 	printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
e                  52 arch/alpha/oprofile/common.c 	unsigned long i, e;
e                  59 arch/alpha/oprofile/common.c 	for (i = e = 0; i < model->num_counters; ++i)
e                  61 arch/alpha/oprofile/common.c 			e |= 1 << i;
e                  62 arch/alpha/oprofile/common.c 	reg.enable = e;
e                  32 arch/arc/include/asm/disasm.h #define BITS(word, s, e)	(((word) >> (s)) & (~((-2) << ((e) - (s)))))
e                  13 arch/arc/include/asm/linkage.h .macro ST2 e, o, off
e                  22 arch/arc/include/asm/linkage.h .macro LD2 e, o, off
e                 243 arch/arc/include/asm/pgtable.h #define pte_ERROR(e) \
e                 244 arch/arc/include/asm/pgtable.h 	pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
e                 245 arch/arc/include/asm/pgtable.h #define pgd_ERROR(e) \
e                 246 arch/arc/include/asm/pgtable.h 	pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
e                  23 arch/arc/include/asm/tlbflush.h #define flush_tlb_range(vma, s, e)	local_flush_tlb_range(vma, s, e)
e                  25 arch/arc/include/asm/tlbflush.h #define flush_tlb_kernel_range(s, e)	local_flush_tlb_kernel_range(s, e)
e                  29 arch/arc/include/asm/tlbflush.h #define flush_pmd_tlb_range(vma, s, e)	local_flush_pmd_tlb_range(vma, s, e)
e                 423 arch/arc/mm/cache.c 	unsigned int s, e;
e                 428 arch/arc/mm/cache.c 		e = ARC_REG_IC_ENDR;
e                 431 arch/arc/mm/cache.c 		e = ARC_REG_DC_ENDR;
e                 455 arch/arc/mm/cache.c 	write_aux_reg(e, paddr + sz);	/* ENDR is exclusive */
e                 265 arch/arm/include/asm/cacheflush.h #define flush_cache_user_range(s,e)	__cpuc_coherent_user_range(s,e)
e                 271 arch/arm/include/asm/cacheflush.h #define flush_icache_range(s,e)		__cpuc_coherent_kern_range(s,e)
e                 605 arch/arm/include/asm/tlbflush.h #define local_flush_tlb_kernel_range(s,e)	__cpu_flush_kern_tlb_range(s,e)
e                 277 arch/arm/kernel/sys_oabi-compat.c 	struct oabi_epoll_event e;
e                 296 arch/arm/kernel/sys_oabi-compat.c 		e.events = kbuf[i].events;
e                 297 arch/arm/kernel/sys_oabi-compat.c 		e.data = kbuf[i].data;
e                 298 arch/arm/kernel/sys_oabi-compat.c 		err = __copy_to_user(events, &e, sizeof(e));
e                 303 arch/arm/mm/dma-mapping.c 	struct page *page, *p, *e;
e                 313 arch/arm/mm/dma-mapping.c 	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
e                 326 arch/arm/mm/dma-mapping.c 	struct page *e = page + (size >> PAGE_SHIFT);
e                 328 arch/arm/mm/dma-mapping.c 	while (page < e) {
e                1252 arch/arm/probes/kprobes/test-arm.c 	COPROCESSOR_INSTRUCTIONS_ST_LD("",e)
e                1254 arch/arm/probes/kprobes/test-arm.c 	COPROCESSOR_INSTRUCTIONS_MC_MR("",e)
e                 229 arch/arm64/include/asm/esr.h #define esr_sys64_to_sysreg(e)					\
e                 230 arch/arm64/include/asm/esr.h 	sys_reg((((e) & ESR_ELx_SYS64_ISS_OP0_MASK) >>		\
e                 232 arch/arm64/include/asm/esr.h 		(((e) & ESR_ELx_SYS64_ISS_OP1_MASK) >>		\
e                 234 arch/arm64/include/asm/esr.h 		(((e) & ESR_ELx_SYS64_ISS_CRN_MASK) >>		\
e                 236 arch/arm64/include/asm/esr.h 		(((e) & ESR_ELx_SYS64_ISS_CRM_MASK) >>		\
e                 238 arch/arm64/include/asm/esr.h 		(((e) & ESR_ELx_SYS64_ISS_OP2_MASK) >>		\
e                 241 arch/arm64/include/asm/esr.h #define esr_cp15_to_sysreg(e)					\
e                 243 arch/arm64/include/asm/esr.h 		(((e) & ESR_ELx_SYS64_ISS_OP1_MASK) >>		\
e                 245 arch/arm64/include/asm/esr.h 		(((e) & ESR_ELx_SYS64_ISS_CRN_MASK) >>		\
e                 247 arch/arm64/include/asm/esr.h 		(((e) & ESR_ELx_SYS64_ISS_CRM_MASK) >>		\
e                 249 arch/arm64/include/asm/esr.h 		(((e) & ESR_ELx_SYS64_ISS_OP2_MASK) >>		\
e                  65 arch/arm64/include/asm/module.h static inline bool plt_entry_is_initialized(const struct plt_entry *e)
e                  67 arch/arm64/include/asm/module.h 	return e->adrp || e->add || e->br;
e                  37 arch/c6x/include/asm/cacheflush.h #define flush_icache_range(s, e)				  \
e                  39 arch/c6x/include/asm/cacheflush.h 		L1D_cache_block_writeback((s), (e));		  \
e                  40 arch/c6x/include/asm/cacheflush.h 		L1P_cache_block_invalidate((s), (e));		  \
e                  54 arch/c6x/include/asm/special_insns.h #define _extu(x, s, e)							\
e                  57 arch/c6x/include/asm/special_insns.h 			      "=b"(__x) : "n"(s), "n"(e), "b"(x));	\
e                  42 arch/c6x/include/asm/syscalls.h extern int sys_cache_sync(unsigned long s, unsigned long e);
e                  38 arch/c6x/kernel/sys_c6x.c asmlinkage int sys_cache_sync(unsigned long s, unsigned long e)
e                  40 arch/c6x/kernel/sys_c6x.c 	L1D_cache_block_writeback_invalidate(s, e);
e                  41 arch/c6x/kernel/sys_c6x.c 	L1P_cache_block_invalidate(s, e);
e                  34 arch/csky/include/asm/pgtable.h #define pte_ERROR(e) \
e                  35 arch/csky/include/asm/pgtable.h 	pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
e                  36 arch/csky/include/asm/pgtable.h #define pgd_ERROR(e) \
e                  37 arch/csky/include/asm/pgtable.h 	pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
e                  98 arch/hexagon/include/asm/pgtable.h #define pgd_ERROR(e) \
e                 100 arch/hexagon/include/asm/pgtable.h 		pgd_val(e))
e                   9 arch/ia64/include/asm/exception.h 				  const struct exception_table_entry *e);
e                  14 arch/ia64/include/asm/exception.h 	const struct exception_table_entry *e;				  \
e                  15 arch/ia64/include/asm/exception.h 	e = search_exception_tables((regs)->cr_iip + ia64_psr(regs)->ri); \
e                  16 arch/ia64/include/asm/exception.h 	if (e) {							  \
e                  17 arch/ia64/include/asm/exception.h 		ia64_handle_exception(regs, e);				  \
e                  39 arch/ia64/include/asm/meminit.h extern unsigned long efi_memmap_init(u64 *s, u64 *e);
e                 183 arch/ia64/include/asm/pgtable.h #define pgd_ERROR(e)	printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
e                 185 arch/ia64/include/asm/pgtable.h #define pud_ERROR(e)	printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
e                 187 arch/ia64/include/asm/pgtable.h #define pmd_ERROR(e)	printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
e                 188 arch/ia64/include/asm/pgtable.h #define pte_ERROR(e)	printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
e                1049 arch/ia64/kernel/efi.c efi_memmap_init(u64 *s, u64 *e)
e                1167 arch/ia64/kernel/efi.c 	*e = (u64)++k;
e                 512 arch/ia64/kernel/module.c 	struct got_entry *got, *e;
e                 518 arch/ia64/kernel/module.c 	for (e = got; e < got + mod->arch.next_got_entry; ++e)
e                 519 arch/ia64/kernel/module.c 		if (e->val == value)
e                 523 arch/ia64/kernel/module.c 	BUG_ON(e >= (struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size));
e                 525 arch/ia64/kernel/module.c 	e->val = value;
e                 528 arch/ia64/kernel/module.c 	return (uint64_t) e - mod->arch.gp;
e                 108 arch/ia64/kernel/perfmon_default_smpl.c 	unsigned long *e, entry_size;
e                 138 arch/ia64/kernel/perfmon_default_smpl.c 	e = (unsigned long *)(ent+1);
e                 180 arch/ia64/kernel/perfmon_default_smpl.c 			*e++ = *val++;
e                1503 arch/ia64/kernel/unwind.c 	const struct unw_table_entry *e = NULL;
e                1509 arch/ia64/kernel/unwind.c 		e = &table->array[mid];
e                1510 arch/ia64/kernel/unwind.c 		if (rel_ip < e->start_offset)
e                1512 arch/ia64/kernel/unwind.c 		else if (rel_ip >= e->end_offset)
e                1517 arch/ia64/kernel/unwind.c 	if (rel_ip < e->start_offset || rel_ip >= e->end_offset)
e                1519 arch/ia64/kernel/unwind.c 	return e;
e                1529 arch/ia64/kernel/unwind.c 	const struct unw_table_entry *e = NULL;
e                1579 arch/ia64/kernel/unwind.c 			e = lookup(table, ip - table->segment_base);
e                1584 arch/ia64/kernel/unwind.c 	if (!e) {
e                1598 arch/ia64/kernel/unwind.c 	sr.when_target = (3*((ip & ~0xfUL) - (table->segment_base + e->start_offset))/16
e                1600 arch/ia64/kernel/unwind.c 	hdr = *(u64 *) (table->segment_base + e->info_offset);
e                1601 arch/ia64/kernel/unwind.c 	dp =   (u8 *)  (table->segment_base + e->info_offset + 8);
e                1642 arch/ia64/kernel/unwind.c 		__func__, table->segment_base + e->start_offset, sr.when_target);
e                  15 arch/ia64/mm/extable.c ia64_handle_exception (struct pt_regs *regs, const struct exception_table_entry *e)
e                  17 arch/ia64/mm/extable.c 	long fix = (u64) &e->fixup + e->fixup;
e                 116 arch/m68k/include/asm/bootstd.h #define _bsc5(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e) \
e                 117 arch/m68k/include/asm/bootstd.h type name(atype a, btype b, ctype c, dtype d, etype e) \
e                 124 arch/m68k/include/asm/bootstd.h    register long __e __asm__ ("%d5") = (long)e; \
e                 206 arch/m68k/include/asm/mcf_pgtable.h #define pte_ERROR(e) \
e                 208 arch/m68k/include/asm/mcf_pgtable.h 	__FILE__, __LINE__, pte_val(e))
e                 209 arch/m68k/include/asm/mcf_pgtable.h #define pmd_ERROR(e) \
e                 211 arch/m68k/include/asm/mcf_pgtable.h 	__FILE__, __LINE__, pmd_val(e))
e                 212 arch/m68k/include/asm/mcf_pgtable.h #define pgd_ERROR(e) \
e                 214 arch/m68k/include/asm/mcf_pgtable.h 	__FILE__, __LINE__, pgd_val(e))
e                 156 arch/m68k/include/asm/motorola_pgtable.h #define pte_ERROR(e) \
e                 157 arch/m68k/include/asm/motorola_pgtable.h 	printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
e                 158 arch/m68k/include/asm/motorola_pgtable.h #define pmd_ERROR(e) \
e                 159 arch/m68k/include/asm/motorola_pgtable.h 	printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
e                 160 arch/m68k/include/asm/motorola_pgtable.h #define pgd_ERROR(e) \
e                 161 arch/m68k/include/asm/motorola_pgtable.h 	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
e                 154 arch/m68k/include/asm/sun3_pgtable.h #define pte_ERROR(e) \
e                 155 arch/m68k/include/asm/sun3_pgtable.h 	pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
e                 156 arch/m68k/include/asm/sun3_pgtable.h #define pmd_ERROR(e) \
e                 157 arch/m68k/include/asm/sun3_pgtable.h 	pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
e                 158 arch/m68k/include/asm/sun3_pgtable.h #define pgd_ERROR(e) \
e                 159 arch/m68k/include/asm/sun3_pgtable.h 	pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
e                 165 arch/microblaze/include/asm/pgtable.h #define pte_ERROR(e) \
e                 167 arch/microblaze/include/asm/pgtable.h 		__FILE__, __LINE__, pte_val(e))
e                 168 arch/microblaze/include/asm/pgtable.h #define pmd_ERROR(e) \
e                 170 arch/microblaze/include/asm/pgtable.h 		__FILE__, __LINE__, pmd_val(e))
e                 171 arch/microblaze/include/asm/pgtable.h #define pgd_ERROR(e) \
e                 173 arch/microblaze/include/asm/pgtable.h 		__FILE__, __LINE__, pgd_val(e))
e                 169 arch/mips/boot/elf2ecoff.c static void convert_elf_hdr(Elf32_Ehdr * e)
e                 171 arch/mips/boot/elf2ecoff.c 	e->e_type = swab16(e->e_type);
e                 172 arch/mips/boot/elf2ecoff.c 	e->e_machine = swab16(e->e_machine);
e                 173 arch/mips/boot/elf2ecoff.c 	e->e_version = swab32(e->e_version);
e                 174 arch/mips/boot/elf2ecoff.c 	e->e_entry = swab32(e->e_entry);
e                 175 arch/mips/boot/elf2ecoff.c 	e->e_phoff = swab32(e->e_phoff);
e                 176 arch/mips/boot/elf2ecoff.c 	e->e_shoff = swab32(e->e_shoff);
e                 177 arch/mips/boot/elf2ecoff.c 	e->e_flags = swab32(e->e_flags);
e                 178 arch/mips/boot/elf2ecoff.c 	e->e_ehsize = swab16(e->e_ehsize);
e                 179 arch/mips/boot/elf2ecoff.c 	e->e_phentsize = swab16(e->e_phentsize);
e                 180 arch/mips/boot/elf2ecoff.c 	e->e_phnum = swab16(e->e_phnum);
e                 181 arch/mips/boot/elf2ecoff.c 	e->e_shentsize = swab16(e->e_shentsize);
e                 182 arch/mips/boot/elf2ecoff.c 	e->e_shnum = swab16(e->e_shnum);
e                 183 arch/mips/boot/elf2ecoff.c 	e->e_shstrndx = swab16(e->e_shstrndx);
e                 111 arch/mips/include/asm/pgtable-32.h #define pte_ERROR(e) \
e                 112 arch/mips/include/asm/pgtable-32.h 	printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
e                 114 arch/mips/include/asm/pgtable-32.h #define pte_ERROR(e) \
e                 115 arch/mips/include/asm/pgtable-32.h 	printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
e                 117 arch/mips/include/asm/pgtable-32.h #define pgd_ERROR(e) \
e                 118 arch/mips/include/asm/pgtable-32.h 	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
e                 161 arch/mips/include/asm/pgtable-64.h #define pte_ERROR(e) \
e                 162 arch/mips/include/asm/pgtable-64.h 	printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
e                 164 arch/mips/include/asm/pgtable-64.h #define pmd_ERROR(e) \
e                 165 arch/mips/include/asm/pgtable-64.h 	printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
e                 168 arch/mips/include/asm/pgtable-64.h #define pud_ERROR(e) \
e                 169 arch/mips/include/asm/pgtable-64.h 	printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
e                 171 arch/mips/include/asm/pgtable-64.h #define pgd_ERROR(e) \
e                 172 arch/mips/include/asm/pgtable-64.h 	printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
e                  38 arch/mips/kernel/jump_label.c void arch_jump_label_transform(struct jump_entry *e,
e                  45 arch/mips/kernel/jump_label.c 	insn_p = (union mips_instruction *)msk_isa16_mode(e->code);
e                  48 arch/mips/kernel/jump_label.c 	BUG_ON((e->target & J_ALIGN_MASK) != J_ISA_BIT);
e                  52 arch/mips/kernel/jump_label.c 			offset = e->target - ((unsigned long)insn_p + 4);
e                  69 arch/mips/kernel/jump_label.c 			WARN_ON((e->target & ~J_RANGE_MASK) !=
e                  70 arch/mips/kernel/jump_label.c 				((e->code + 4) & ~J_RANGE_MASK));
e                  73 arch/mips/kernel/jump_label.c 			insn.j_format.target = e->target >> J_RANGE_SHIFT;
e                 412 arch/mips/kernel/module.c 	const struct exception_table_entry *e = NULL;
e                 417 arch/mips/kernel/module.c 		e = search_extable(dbe->dbe_start,
e                 419 arch/mips/kernel/module.c 		if (e)
e                 426 arch/mips/kernel/module.c 	return e;
e                 174 arch/mips/kernel/relocate.c 	unsigned long *etable_start, *etable_end, *e;
e                 179 arch/mips/kernel/relocate.c 	for (e = etable_start; e < etable_end; e++)
e                 180 arch/mips/kernel/relocate.c 		*e += offset;
e                 431 arch/mips/kernel/traps.c 	const struct exception_table_entry *e;
e                 433 arch/mips/kernel/traps.c 	e = search_extable(__start___dbe_table,
e                 435 arch/mips/kernel/traps.c 	if (!e)
e                 436 arch/mips/kernel/traps.c 		e = search_module_dbetables(addr);
e                 437 arch/mips/kernel/traps.c 	return e;
e                  38 arch/mips/lasat/picvue.c 	data |= picvue->e;
e                  44 arch/mips/lasat/picvue.c 	pvc_reg_write(data & ~picvue->e);
e                  52 arch/mips/lasat/picvue.c 	data |= picvue->e;
e                  56 arch/mips/lasat/picvue.c 	data &= ~picvue->e;
e                  12 arch/mips/lasat/picvue.h 	u32 e;
e                  50 arch/mips/math-emu/ieee754dp.h #define DPDNORMx(m,e) \
e                  51 arch/mips/math-emu/ieee754dp.h 	while ((m >> DP_FBITS) == 0) { m <<= 1; e--; }
e                  54 arch/mips/math-emu/ieee754sp.h #define SPDNORMx(m,e) \
e                  55 arch/mips/math-emu/ieee754sp.h 	while ((m >> SP_FBITS) == 0) { m <<= 1; e--; }
e                  32 arch/mips/mm/uasm-micromips.c #define M(a, b, c, d, e, f)					\
e                  37 arch/mips/mm/uasm-micromips.c 	 | (e) << RE_SH						\
e                  32 arch/mips/mm/uasm-mips.c #define M(a, b, c, d, e, f)					\
e                  37 arch/mips/mm/uasm-mips.c 	 | (e) << RE_SH						\
e                  41 arch/mips/mm/uasm-mips.c #define M6(a, b, c, d, e)					\
e                  46 arch/mips/mm/uasm-mips.c 	 | (e) << FUNC_SH)
e                 128 arch/mips/sgi-ip27/ip27-init.c 	u64 p, e, n_mode;
e                 140 arch/mips/sgi-ip27/ip27-init.c 	e = LOCAL_HUB_L(PI_CPU_ENABLE_A) & 1;
e                 143 arch/mips/sgi-ip27/ip27-init.c 	       e ? ", CPU is running" : "");
e                 146 arch/mips/sgi-ip27/ip27-init.c 	e = LOCAL_HUB_L(PI_CPU_ENABLE_B) & 1;
e                 149 arch/mips/sgi-ip27/ip27-init.c 	       e ? ", CPU is running" : "");
e                  13 arch/nds32/include/asm/assembler.h 	setgie.e
e                  26 arch/nds32/include/asm/assembler.h 	setgie.e
e                  55 arch/nds32/mm/init.c 	unsigned long v, p, e;
e                  65 arch/nds32/mm/init.c 	e = min((u32) memblock_end_of_DRAM(), (u32) __pa(high_memory));
e                  70 arch/nds32/mm/init.c 	while (p < e) {
e                  88 arch/nds32/mm/init.c 		for (j = 0; p < e && j < PTRS_PER_PTE;
e                 265 arch/nios2/include/asm/pgtable.h #define pte_ERROR(e) \
e                 267 arch/nios2/include/asm/pgtable.h 		__FILE__, __LINE__, pte_val(e))
e                 268 arch/nios2/include/asm/pgtable.h #define pgd_ERROR(e) \
e                 270 arch/nios2/include/asm/pgtable.h 		__FILE__, __LINE__, pgd_val(e))
e                 404 arch/openrisc/include/asm/pgtable.h #define pte_ERROR(e) \
e                 406 arch/openrisc/include/asm/pgtable.h 	       __FILE__, __LINE__, &(e), pte_val(e))
e                 407 arch/openrisc/include/asm/pgtable.h #define pgd_ERROR(e) \
e                 409 arch/openrisc/include/asm/pgtable.h 	       __FILE__, __LINE__, &(e), pgd_val(e))
e                  71 arch/openrisc/mm/init.c 	unsigned long v, p, e;
e                  86 arch/openrisc/mm/init.c 		e = p + (u32) region->size;
e                  91 arch/openrisc/mm/init.c 		while (p < e) {
e                 110 arch/openrisc/mm/init.c 			for (j = 0; p < e && j < PTRS_PER_PTE;
e                  66 arch/parisc/include/asm/cacheflush.h #define flush_icache_range(s,e)		do { 		\
e                  67 arch/parisc/include/asm/cacheflush.h 	flush_kernel_dcache_range_asm(s,e); 		\
e                  68 arch/parisc/include/asm/cacheflush.h 	flush_kernel_icache_range_asm(s,e); 		\
e                 102 arch/parisc/include/asm/pgtable.h #define pte_ERROR(e) \
e                 103 arch/parisc/include/asm/pgtable.h 	printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
e                 104 arch/parisc/include/asm/pgtable.h #define pmd_ERROR(e) \
e                 105 arch/parisc/include/asm/pgtable.h 	printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, (unsigned long)pmd_val(e))
e                 106 arch/parisc/include/asm/pgtable.h #define pgd_ERROR(e) \
e                 107 arch/parisc/include/asm/pgtable.h 	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
e                  71 arch/parisc/include/asm/psw.h 	unsigned int e:1;
e                  49 arch/parisc/kernel/unwind.c 	const struct unwind_table_entry *e = NULL;
e                  57 arch/parisc/kernel/unwind.c 		e = &table->table[mid];
e                  58 arch/parisc/kernel/unwind.c 		if (addr < e->region_start)
e                  60 arch/parisc/kernel/unwind.c 		else if (addr > e->region_end)
e                  63 arch/parisc/kernel/unwind.c 			return e;
e                  73 arch/parisc/kernel/unwind.c 	const struct unwind_table_entry *e = NULL;
e                  77 arch/parisc/kernel/unwind.c 		e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
e                  85 arch/parisc/kernel/unwind.c 				e = find_unwind_entry_in_table(table, addr);
e                  86 arch/parisc/kernel/unwind.c 			if (e) {
e                  95 arch/parisc/kernel/unwind.c 	return e;
e                 150 arch/parisc/kernel/unwind.c 	struct unwind_table_entry *e = (struct unwind_table_entry *)end;
e                 152 arch/parisc/kernel/unwind.c 	unwind_table_sort(s, e);
e                 268 arch/parisc/kernel/unwind.c 	const struct unwind_table_entry *e;
e                 274 arch/parisc/kernel/unwind.c 	e = find_unwind_entry(info->ip);
e                 275 arch/parisc/kernel/unwind.c 	if (e == NULL) {
e                 319 arch/parisc/kernel/unwind.c 		    e->region_start, e->region_end, e->Save_SP, e->Save_RP, 
e                 320 arch/parisc/kernel/unwind.c 		    e->Millicode, e->Total_frame_size);
e                 322 arch/parisc/kernel/unwind.c 		looking_for_rp = e->Save_RP;
e                 324 arch/parisc/kernel/unwind.c 		for (npc = e->region_start; 
e                 325 arch/parisc/kernel/unwind.c 		     (frame_size < (e->Total_frame_size << 3) || 
e                 360 arch/parisc/kernel/unwind.c 		if (frame_size > e->Total_frame_size << 3)
e                 361 arch/parisc/kernel/unwind.c 			frame_size = e->Total_frame_size << 3;
e                 363 arch/parisc/kernel/unwind.c 		if (!unwind_special(info, e->region_start, frame_size)) {
e                 365 arch/parisc/kernel/unwind.c 			if (e->Millicode)
e                 146 arch/powerpc/boot/rs6000.h     } e;
e                 147 arch/powerpc/boot/rs6000.h   } e;
e                 205 arch/powerpc/include/asm/book3s/32/pgtable.h #define pte_ERROR(e) \
e                 207 arch/powerpc/include/asm/book3s/32/pgtable.h 		(unsigned long long)pte_val(e))
e                 208 arch/powerpc/include/asm/book3s/32/pgtable.h #define pgd_ERROR(e) \
e                 209 arch/powerpc/include/asm/book3s/32/pgtable.h 	pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
e                 334 arch/powerpc/include/asm/book3s/64/pgtable.h #define __real_pte(e, p, o)		((real_pte_t){(e)})
e                1028 arch/powerpc/include/asm/book3s/64/pgtable.h #define pte_ERROR(e) \
e                1029 arch/powerpc/include/asm/book3s/64/pgtable.h 	pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
e                1030 arch/powerpc/include/asm/book3s/64/pgtable.h #define pmd_ERROR(e) \
e                1031 arch/powerpc/include/asm/book3s/64/pgtable.h 	pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
e                1032 arch/powerpc/include/asm/book3s/64/pgtable.h #define pud_ERROR(e) \
e                1033 arch/powerpc/include/asm/book3s/64/pgtable.h 	pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
e                1034 arch/powerpc/include/asm/book3s/64/pgtable.h #define pgd_ERROR(e) \
e                1035 arch/powerpc/include/asm/book3s/64/pgtable.h 	pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
e                  59 arch/powerpc/include/asm/nohash/32/pgtable.h #define pte_ERROR(e) \
e                  61 arch/powerpc/include/asm/nohash/32/pgtable.h 		(unsigned long long)pte_val(e))
e                  62 arch/powerpc/include/asm/nohash/32/pgtable.h #define pgd_ERROR(e) \
e                  63 arch/powerpc/include/asm/nohash/32/pgtable.h 	pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
e                  85 arch/powerpc/include/asm/nohash/64/pgtable-4k.h #define pud_ERROR(e) \
e                  86 arch/powerpc/include/asm/nohash/64/pgtable-4k.h 	pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
e                 335 arch/powerpc/include/asm/nohash/64/pgtable.h #define pte_ERROR(e) \
e                 336 arch/powerpc/include/asm/nohash/64/pgtable.h 	pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
e                 337 arch/powerpc/include/asm/nohash/64/pgtable.h #define pmd_ERROR(e) \
e                 338 arch/powerpc/include/asm/nohash/64/pgtable.h 	pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
e                 339 arch/powerpc/include/asm/nohash/64/pgtable.h #define pgd_ERROR(e) \
e                 340 arch/powerpc/include/asm/nohash/64/pgtable.h 	pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
e                 274 arch/powerpc/include/uapi/asm/kvm.h 		} e;
e                1045 arch/powerpc/kvm/book3s.c static int kvmppc_book3s_set_irq(struct kvm_kernel_irq_routing_entry *e,
e                1049 arch/powerpc/kvm/book3s.c 	return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
e                1496 arch/powerpc/kvm/booke.c 	sregs->u.e.features |= KVM_SREGS_E_BASE;
e                1498 arch/powerpc/kvm/booke.c 	sregs->u.e.csrr0 = vcpu->arch.csrr0;
e                1499 arch/powerpc/kvm/booke.c 	sregs->u.e.csrr1 = vcpu->arch.csrr1;
e                1500 arch/powerpc/kvm/booke.c 	sregs->u.e.mcsr = vcpu->arch.mcsr;
e                1501 arch/powerpc/kvm/booke.c 	sregs->u.e.esr = kvmppc_get_esr(vcpu);
e                1502 arch/powerpc/kvm/booke.c 	sregs->u.e.dear = kvmppc_get_dar(vcpu);
e                1503 arch/powerpc/kvm/booke.c 	sregs->u.e.tsr = vcpu->arch.tsr;
e                1504 arch/powerpc/kvm/booke.c 	sregs->u.e.tcr = vcpu->arch.tcr;
e                1505 arch/powerpc/kvm/booke.c 	sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
e                1506 arch/powerpc/kvm/booke.c 	sregs->u.e.tb = tb;
e                1507 arch/powerpc/kvm/booke.c 	sregs->u.e.vrsave = vcpu->arch.vrsave;
e                1513 arch/powerpc/kvm/booke.c 	if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
e                1516 arch/powerpc/kvm/booke.c 	vcpu->arch.csrr0 = sregs->u.e.csrr0;
e                1517 arch/powerpc/kvm/booke.c 	vcpu->arch.csrr1 = sregs->u.e.csrr1;
e                1518 arch/powerpc/kvm/booke.c 	vcpu->arch.mcsr = sregs->u.e.mcsr;
e                1519 arch/powerpc/kvm/booke.c 	kvmppc_set_esr(vcpu, sregs->u.e.esr);
e                1520 arch/powerpc/kvm/booke.c 	kvmppc_set_dar(vcpu, sregs->u.e.dear);
e                1521 arch/powerpc/kvm/booke.c 	vcpu->arch.vrsave = sregs->u.e.vrsave;
e                1522 arch/powerpc/kvm/booke.c 	kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
e                1524 arch/powerpc/kvm/booke.c 	if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
e                1525 arch/powerpc/kvm/booke.c 		vcpu->arch.dec = sregs->u.e.dec;
e                1529 arch/powerpc/kvm/booke.c 	if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR)
e                1530 arch/powerpc/kvm/booke.c 		kvmppc_set_tsr(vcpu, sregs->u.e.tsr);
e                1538 arch/powerpc/kvm/booke.c 	sregs->u.e.features |= KVM_SREGS_E_ARCH206;
e                1540 arch/powerpc/kvm/booke.c 	sregs->u.e.pir = vcpu->vcpu_id;
e                1541 arch/powerpc/kvm/booke.c 	sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
e                1542 arch/powerpc/kvm/booke.c 	sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
e                1543 arch/powerpc/kvm/booke.c 	sregs->u.e.decar = vcpu->arch.decar;
e                1544 arch/powerpc/kvm/booke.c 	sregs->u.e.ivpr = vcpu->arch.ivpr;
e                1550 arch/powerpc/kvm/booke.c 	if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
e                1553 arch/powerpc/kvm/booke.c 	if (sregs->u.e.pir != vcpu->vcpu_id)
e                1556 arch/powerpc/kvm/booke.c 	vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
e                1557 arch/powerpc/kvm/booke.c 	vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
e                1558 arch/powerpc/kvm/booke.c 	vcpu->arch.decar = sregs->u.e.decar;
e                1559 arch/powerpc/kvm/booke.c 	vcpu->arch.ivpr = sregs->u.e.ivpr;
e                1566 arch/powerpc/kvm/booke.c 	sregs->u.e.features |= KVM_SREGS_E_IVOR;
e                1568 arch/powerpc/kvm/booke.c 	sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
e                1569 arch/powerpc/kvm/booke.c 	sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
e                1570 arch/powerpc/kvm/booke.c 	sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
e                1571 arch/powerpc/kvm/booke.c 	sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
e                1572 arch/powerpc/kvm/booke.c 	sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
e                1573 arch/powerpc/kvm/booke.c 	sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
e                1574 arch/powerpc/kvm/booke.c 	sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
e                1575 arch/powerpc/kvm/booke.c 	sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
e                1576 arch/powerpc/kvm/booke.c 	sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
e                1577 arch/powerpc/kvm/booke.c 	sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
e                1578 arch/powerpc/kvm/booke.c 	sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
e                1579 arch/powerpc/kvm/booke.c 	sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
e                1580 arch/powerpc/kvm/booke.c 	sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
e                1581 arch/powerpc/kvm/booke.c 	sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
e                1582 arch/powerpc/kvm/booke.c 	sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
e                1583 arch/powerpc/kvm/booke.c 	sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
e                1589 arch/powerpc/kvm/booke.c 	if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
e                1592 arch/powerpc/kvm/booke.c 	vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
e                1593 arch/powerpc/kvm/booke.c 	vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
e                1594 arch/powerpc/kvm/booke.c 	vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
e                1595 arch/powerpc/kvm/booke.c 	vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
e                1596 arch/powerpc/kvm/booke.c 	vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
e                1597 arch/powerpc/kvm/booke.c 	vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
e                1598 arch/powerpc/kvm/booke.c 	vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
e                1599 arch/powerpc/kvm/booke.c 	vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
e                1600 arch/powerpc/kvm/booke.c 	vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
e                1601 arch/powerpc/kvm/booke.c 	vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
e                1602 arch/powerpc/kvm/booke.c 	vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
e                1603 arch/powerpc/kvm/booke.c 	vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
e                1604 arch/powerpc/kvm/booke.c 	vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
e                1605 arch/powerpc/kvm/booke.c 	vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
e                1606 arch/powerpc/kvm/booke.c 	vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
e                1607 arch/powerpc/kvm/booke.c 	vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
e                 366 arch/powerpc/kvm/e500.c 	sregs->u.e.features |= KVM_SREGS_E_ARCH206_MMU | KVM_SREGS_E_SPE |
e                 368 arch/powerpc/kvm/e500.c 	sregs->u.e.impl_id = KVM_SREGS_E_IMPL_FSL;
e                 370 arch/powerpc/kvm/e500.c 	sregs->u.e.impl.fsl.features = 0;
e                 371 arch/powerpc/kvm/e500.c 	sregs->u.e.impl.fsl.svr = vcpu_e500->svr;
e                 372 arch/powerpc/kvm/e500.c 	sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0;
e                 373 arch/powerpc/kvm/e500.c 	sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar;
e                 375 arch/powerpc/kvm/e500.c 	sregs->u.e.ivor_high[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
e                 376 arch/powerpc/kvm/e500.c 	sregs->u.e.ivor_high[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
e                 377 arch/powerpc/kvm/e500.c 	sregs->u.e.ivor_high[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
e                 378 arch/powerpc/kvm/e500.c 	sregs->u.e.ivor_high[3] =
e                 392 arch/powerpc/kvm/e500.c 	if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
e                 393 arch/powerpc/kvm/e500.c 		vcpu_e500->svr = sregs->u.e.impl.fsl.svr;
e                 394 arch/powerpc/kvm/e500.c 		vcpu_e500->hid0 = sregs->u.e.impl.fsl.hid0;
e                 395 arch/powerpc/kvm/e500.c 		vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar;
e                 402 arch/powerpc/kvm/e500.c 	if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
e                 405 arch/powerpc/kvm/e500.c 	if (sregs->u.e.features & KVM_SREGS_E_SPE) {
e                 407 arch/powerpc/kvm/e500.c 			sregs->u.e.ivor_high[0];
e                 409 arch/powerpc/kvm/e500.c 			sregs->u.e.ivor_high[1];
e                 411 arch/powerpc/kvm/e500.c 			sregs->u.e.ivor_high[2];
e                 414 arch/powerpc/kvm/e500.c 	if (sregs->u.e.features & KVM_SREGS_E_PM) {
e                 416 arch/powerpc/kvm/e500.c 			sregs->u.e.ivor_high[3];
e                 573 arch/powerpc/kvm/e500_mmu.c 	sregs->u.e.mas0 = vcpu->arch.shared->mas0;
e                 574 arch/powerpc/kvm/e500_mmu.c 	sregs->u.e.mas1 = vcpu->arch.shared->mas1;
e                 575 arch/powerpc/kvm/e500_mmu.c 	sregs->u.e.mas2 = vcpu->arch.shared->mas2;
e                 576 arch/powerpc/kvm/e500_mmu.c 	sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3;
e                 577 arch/powerpc/kvm/e500_mmu.c 	sregs->u.e.mas4 = vcpu->arch.shared->mas4;
e                 578 arch/powerpc/kvm/e500_mmu.c 	sregs->u.e.mas6 = vcpu->arch.shared->mas6;
e                 580 arch/powerpc/kvm/e500_mmu.c 	sregs->u.e.mmucfg = vcpu->arch.mmucfg;
e                 581 arch/powerpc/kvm/e500_mmu.c 	sregs->u.e.tlbcfg[0] = vcpu->arch.tlbcfg[0];
e                 582 arch/powerpc/kvm/e500_mmu.c 	sregs->u.e.tlbcfg[1] = vcpu->arch.tlbcfg[1];
e                 583 arch/powerpc/kvm/e500_mmu.c 	sregs->u.e.tlbcfg[2] = 0;
e                 584 arch/powerpc/kvm/e500_mmu.c 	sregs->u.e.tlbcfg[3] = 0;
e                 589 arch/powerpc/kvm/e500_mmu.c 	if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) {
e                 590 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.shared->mas0 = sregs->u.e.mas0;
e                 591 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.shared->mas1 = sregs->u.e.mas1;
e                 592 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.shared->mas2 = sregs->u.e.mas2;
e                 593 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3;
e                 594 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.shared->mas4 = sregs->u.e.mas4;
e                 595 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.shared->mas6 = sregs->u.e.mas6;
e                 219 arch/powerpc/kvm/e500mc.c 	sregs->u.e.features |= KVM_SREGS_E_ARCH206_MMU | KVM_SREGS_E_PM |
e                 221 arch/powerpc/kvm/e500mc.c 	sregs->u.e.impl_id = KVM_SREGS_E_IMPL_FSL;
e                 223 arch/powerpc/kvm/e500mc.c 	sregs->u.e.impl.fsl.features = 0;
e                 224 arch/powerpc/kvm/e500mc.c 	sregs->u.e.impl.fsl.svr = vcpu_e500->svr;
e                 225 arch/powerpc/kvm/e500mc.c 	sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0;
e                 226 arch/powerpc/kvm/e500mc.c 	sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar;
e                 230 arch/powerpc/kvm/e500mc.c 	sregs->u.e.ivor_high[3] =
e                 232 arch/powerpc/kvm/e500mc.c 	sregs->u.e.ivor_high[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
e                 233 arch/powerpc/kvm/e500mc.c 	sregs->u.e.ivor_high[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
e                 244 arch/powerpc/kvm/e500mc.c 	if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
e                 245 arch/powerpc/kvm/e500mc.c 		vcpu_e500->svr = sregs->u.e.impl.fsl.svr;
e                 246 arch/powerpc/kvm/e500mc.c 		vcpu_e500->hid0 = sregs->u.e.impl.fsl.hid0;
e                 247 arch/powerpc/kvm/e500mc.c 		vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar;
e                 254 arch/powerpc/kvm/e500mc.c 	if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
e                 257 arch/powerpc/kvm/e500mc.c 	if (sregs->u.e.features & KVM_SREGS_E_PM) {
e                 259 arch/powerpc/kvm/e500mc.c 			sregs->u.e.ivor_high[3];
e                 262 arch/powerpc/kvm/e500mc.c 	if (sregs->u.e.features & KVM_SREGS_E_PC) {
e                 264 arch/powerpc/kvm/e500mc.c 			sregs->u.e.ivor_high[4];
e                 266 arch/powerpc/kvm/e500mc.c 			sregs->u.e.ivor_high[5];
e                1791 arch/powerpc/kvm/mpic.c static int mpic_set_irq(struct kvm_kernel_irq_routing_entry *e,
e                1795 arch/powerpc/kvm/mpic.c 	u32 irq = e->irqchip.pin;
e                1807 arch/powerpc/kvm/mpic.c int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
e                1819 arch/powerpc/kvm/mpic.c 	openpic_msi_write(kvm->arch.mpic, MSIIR_OFFSET, e->msi.data);
e                1827 arch/powerpc/kvm/mpic.c 			  struct kvm_kernel_irq_routing_entry *e,
e                1834 arch/powerpc/kvm/mpic.c 		e->set = mpic_set_irq;
e                1835 arch/powerpc/kvm/mpic.c 		e->irqchip.irqchip = ue->u.irqchip.irqchip;
e                1836 arch/powerpc/kvm/mpic.c 		e->irqchip.pin = ue->u.irqchip.pin;
e                1837 arch/powerpc/kvm/mpic.c 		if (e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS)
e                1841 arch/powerpc/kvm/mpic.c 		e->set = kvm_set_msi;
e                1842 arch/powerpc/kvm/mpic.c 		e->msi.address_lo = ue->u.msi.address_lo;
e                1843 arch/powerpc/kvm/mpic.c 		e->msi.address_hi = ue->u.msi.address_hi;
e                1844 arch/powerpc/kvm/mpic.c 		e->msi.data = ue->u.msi.data;
e                  30 arch/powerpc/lib/rheap.c static inline void fixup(unsigned long s, unsigned long e, int d,
e                  36 arch/powerpc/lib/rheap.c 	if (*pp >= s && *pp < e)
e                  40 arch/powerpc/lib/rheap.c 	if (*pp >= s && *pp < e)
e                 158 arch/powerpc/lib/rheap.c 	unsigned long s, e, bs, be;
e                 164 arch/powerpc/lib/rheap.c 	e = s + size;
e                 184 arch/powerpc/lib/rheap.c 		if (e == bs)
e                 196 arch/powerpc/lib/rheap.c 	if (after && e != after->start)
e                 332 arch/powerpc/lib/rheap.c 	unsigned long s, e, m;
e                 337 arch/powerpc/lib/rheap.c 	e = s + size;
e                 344 arch/powerpc/lib/rheap.c 	e = e & ~m;
e                 346 arch/powerpc/lib/rheap.c 	if (IS_ERR_VALUE(e) || (e < s))
e                 351 arch/powerpc/lib/rheap.c 	size = e - s;
e                 374 arch/powerpc/lib/rheap.c 	unsigned long s, e, m, bs, be;
e                 382 arch/powerpc/lib/rheap.c 	e = s + size;
e                 389 arch/powerpc/lib/rheap.c 	e = e & ~m;
e                 400 arch/powerpc/lib/rheap.c 		if (s >= bs && e <= be)
e                 409 arch/powerpc/lib/rheap.c 	if (bs == s && be == e) {
e                 417 arch/powerpc/lib/rheap.c 	if (bs == s || be == e) {
e                 428 arch/powerpc/lib/rheap.c 		newblk->start = e;
e                 429 arch/powerpc/lib/rheap.c 		newblk->size = be - e;
e                 531 arch/powerpc/lib/rheap.c 	unsigned long s, e, m, bs = 0, be = 0;
e                 539 arch/powerpc/lib/rheap.c 	e = s + size;
e                 546 arch/powerpc/lib/rheap.c 	e = e & ~m;
e                 557 arch/powerpc/lib/rheap.c 		if (s >= bs && e <= be)
e                 566 arch/powerpc/lib/rheap.c 	if (bs == s && be == e) {
e                 579 arch/powerpc/lib/rheap.c 	if (bs == s || be == e) {
e                 590 arch/powerpc/lib/rheap.c 		newblk2->start = e;
e                 591 arch/powerpc/lib/rheap.c 		newblk2->size = be - e;
e                 598 arch/powerpc/lib/rheap.c 	newblk1->size = e - s;
e                 192 arch/powerpc/mm/book3s64/slb.c 	unsigned long e, v;
e                 201 arch/powerpc/mm/book3s64/slb.c 		asm volatile("slbmfee  %0,%1" : "=r" (e) : "r" (i));
e                 203 arch/powerpc/mm/book3s64/slb.c 		slb_ptr->esid = e;
e                 212 arch/powerpc/mm/book3s64/slb.c 	unsigned long e, v;
e                 222 arch/powerpc/mm/book3s64/slb.c 		e = slb_ptr->esid;
e                 226 arch/powerpc/mm/book3s64/slb.c 		if (!e && !v)
e                 229 arch/powerpc/mm/book3s64/slb.c 		pr_err("%02d %016lx %016lx\n", i, e, v);
e                 231 arch/powerpc/mm/book3s64/slb.c 		if (!(e & SLB_ESID_V)) {
e                 238 arch/powerpc/mm/book3s64/slb.c 			       GET_ESID_1T(e),
e                 242 arch/powerpc/mm/book3s64/slb.c 			       GET_ESID(e),
e                 386 arch/powerpc/platforms/pasemi/setup.c 		unsigned long e, v;
e                 391 arch/powerpc/platforms/pasemi/setup.c 			asm volatile("slbmfee  %0,%1" : "=r" (e) : "r" (i));
e                 393 arch/powerpc/platforms/pasemi/setup.c 			pr_err("%02d %016lx %016lx\n", i, e, v);
e                  44 arch/powerpc/platforms/powernv/opal-irqchip.c 	u64 e;
e                  46 arch/powerpc/platforms/powernv/opal-irqchip.c 	e = READ_ONCE(last_outstanding_events) & opal_event_irqchip.mask;
e                  48 arch/powerpc/platforms/powernv/opal-irqchip.c 	while (e) {
e                  51 arch/powerpc/platforms/powernv/opal-irqchip.c 		hwirq = fls64(e) - 1;
e                  52 arch/powerpc/platforms/powernv/opal-irqchip.c 		e &= ~BIT_ULL(hwirq);
e                  68 arch/powerpc/platforms/powernv/opal-irqchip.c 	e = be64_to_cpu(events) & opal_event_irqchip.mask;
e                  69 arch/powerpc/platforms/powernv/opal-irqchip.c 	if (e)
e                  33 arch/riscv/include/asm/futex.h 	: [op] "Jr" (oparg), [e] "i" (-EFAULT)			\
e                 107 arch/riscv/include/asm/futex.h 	: [ov] "Jr" (oldval), [nv] "Jr" (newval), [e] "i" (-EFAULT)
e                  78 arch/riscv/include/asm/pgtable-64.h #define pmd_ERROR(e) \
e                  79 arch/riscv/include/asm/pgtable-64.h 	pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
e                 297 arch/riscv/include/asm/pgtable.h #define pgd_ERROR(e) \
e                 298 arch/riscv/include/asm/pgtable.h 	pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e))
e                  53 arch/riscv/kernel/cpu.c 	const char *e;
e                  78 arch/riscv/kernel/cpu.c 	for (e = ext; *e != '\0'; ++e) {
e                  79 arch/riscv/kernel/cpu.c 		if (isa[0] == e[0]) {
e                 208 arch/s390/include/asm/pci.h static inline void zpci_event_error(void *e) {}
e                 209 arch/s390/include/asm/pci.h static inline void zpci_event_availability(void *e) {}
e                  68 arch/s390/include/asm/pgtable.h #define pte_ERROR(e) \
e                  69 arch/s390/include/asm/pgtable.h 	printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
e                  70 arch/s390/include/asm/pgtable.h #define pmd_ERROR(e) \
e                  71 arch/s390/include/asm/pgtable.h 	printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
e                  72 arch/s390/include/asm/pgtable.h #define pud_ERROR(e) \
e                  73 arch/s390/include/asm/pgtable.h 	printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
e                  74 arch/s390/include/asm/pgtable.h #define p4d_ERROR(e) \
e                  75 arch/s390/include/asm/pgtable.h 	printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e))
e                  76 arch/s390/include/asm/pgtable.h #define pgd_ERROR(e) \
e                  77 arch/s390/include/asm/pgtable.h 	printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
e                  20 arch/s390/include/uapi/asm/guarded_storage.h 			__u8 e	: 1;
e                  42 arch/s390/include/uapi/asm/runtime_instr.h 	__u32 e			: 1;
e                2756 arch/s390/kvm/interrupt.c static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
e                2766 arch/s390/kvm/interrupt.c 	adapter = get_io_adapter(kvm, e->adapter.adapter_id);
e                2770 arch/s390/kvm/interrupt.c 	ret = adapter_indicators_set(kvm, adapter, &e->adapter);
e                2819 arch/s390/kvm/interrupt.c 			  struct kvm_kernel_irq_routing_entry *e,
e                2826 arch/s390/kvm/interrupt.c 		e->set = set_adapter_int;
e                2827 arch/s390/kvm/interrupt.c 		e->adapter.summary_addr = ue->u.adapter.summary_addr;
e                2828 arch/s390/kvm/interrupt.c 		e->adapter.ind_addr = ue->u.adapter.ind_addr;
e                2829 arch/s390/kvm/interrupt.c 		e->adapter.summary_offset = ue->u.adapter.summary_offset;
e                2830 arch/s390/kvm/interrupt.c 		e->adapter.ind_offset = ue->u.adapter.ind_offset;
e                2831 arch/s390/kvm/interrupt.c 		e->adapter.adapter_id = ue->u.adapter.adapter_id;
e                2841 arch/s390/kvm/interrupt.c int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
e                  29 arch/sh/include/asm/pgtable-3level.h #define pmd_ERROR(e) \
e                  30 arch/sh/include/asm/pgtable-3level.h 	printk("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
e                 428 arch/sh/include/asm/pgtable_32.h #define pte_ERROR(e) \
e                 430 arch/sh/include/asm/pgtable_32.h 	       &(e), (e).pte_high, (e).pte_low)
e                 431 arch/sh/include/asm/pgtable_32.h #define pgd_ERROR(e) \
e                 432 arch/sh/include/asm/pgtable_32.h 	printk("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e))
e                 434 arch/sh/include/asm/pgtable_32.h #define pte_ERROR(e) \
e                 435 arch/sh/include/asm/pgtable_32.h 	printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
e                 436 arch/sh/include/asm/pgtable_32.h #define pgd_ERROR(e) \
e                 437 arch/sh/include/asm/pgtable_32.h 	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
e                  22 arch/sh/include/asm/pgtable_64.h #define pte_ERROR(e) \
e                  23 arch/sh/include/asm/pgtable_64.h 	printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
e                  24 arch/sh/include/asm/pgtable_64.h #define pgd_ERROR(e) \
e                  25 arch/sh/include/asm/pgtable_64.h 	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
e                  33 arch/sparc/crypto/des_glue.c static void encrypt_to_decrypt(u64 *d, const u64 *e)
e                  35 arch/sparc/crypto/des_glue.c 	const u64 *s = e + (DES_EXPKEY_WORDS / 2) - 1;
e                 136 arch/sparc/include/asm/leon_amba.h 	struct leon3_gptimerelem_regs_map e[8];
e                  33 arch/sparc/include/asm/pgtable_32.h #define pte_ERROR(e)   __builtin_trap()
e                  34 arch/sparc/include/asm/pgtable_32.h #define pmd_ERROR(e)   __builtin_trap()
e                  35 arch/sparc/include/asm/pgtable_32.h #define pgd_ERROR(e)   __builtin_trap()
e                 101 arch/sparc/include/asm/pgtable_64.h #define pmd_ERROR(e)							\
e                 103 arch/sparc/include/asm/pgtable_64.h 	       __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0))
e                 104 arch/sparc/include/asm/pgtable_64.h #define pud_ERROR(e)							\
e                 106 arch/sparc/include/asm/pgtable_64.h 	       __FILE__, __LINE__, &(e), pud_val(e), __builtin_return_address(0))
e                 107 arch/sparc/include/asm/pgtable_64.h #define pgd_ERROR(e)							\
e                 109 arch/sparc/include/asm/pgtable_64.h 	       __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0))
e                 267 arch/sparc/kernel/leon_kernel.c 	rld = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].rld);
e                 268 arch/sparc/kernel/leon_kernel.c 	val = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].val);
e                 269 arch/sparc/kernel/leon_kernel.c 	ctrl = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl);
e                 271 arch/sparc/kernel/leon_kernel.c 		val = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].val);
e                 388 arch/sparc/kernel/leon_kernel.c 	ctrl = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl);
e                 389 arch/sparc/kernel/leon_kernel.c 	LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
e                 391 arch/sparc/kernel/leon_kernel.c 	ctrl = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl);
e                 398 arch/sparc/kernel/leon_kernel.c 	LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].val, 0);
e                 399 arch/sparc/kernel/leon_kernel.c 	LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].rld,
e                 402 arch/sparc/kernel/leon_kernel.c 			&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 0);
e                 462 arch/sparc/kernel/leon_kernel.c 	LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
e                 478 arch/sparc/kernel/leon_kernel.c 	ctrl = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl);
e                 479 arch/sparc/kernel/leon_kernel.c 	LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
e                  68 arch/sparc/vdso/vma.c static void *one_section64(struct vdso_elfinfo64 *e, const char *name,
e                  75 arch/sparc/vdso/vma.c 	shdrs = (void *)e->hdr + e->hdr->e_shoff;
e                  76 arch/sparc/vdso/vma.c 	snames = (void *)e->hdr + shdrs[e->hdr->e_shstrndx].sh_offset;
e                  77 arch/sparc/vdso/vma.c 	for (i = 1; i < e->hdr->e_shnum; i++) {
e                  81 arch/sparc/vdso/vma.c 			return (void *)e->hdr + shdrs[i].sh_offset;
e                  89 arch/sparc/vdso/vma.c 	struct vdso_elfinfo64 *e = &_e->u.elf64;
e                  91 arch/sparc/vdso/vma.c 	e->hdr = image->data;
e                  92 arch/sparc/vdso/vma.c 	e->dynsym = one_section64(e, ".dynsym", &e->dynsymsize);
e                  93 arch/sparc/vdso/vma.c 	e->dynstr = one_section64(e, ".dynstr", NULL);
e                  95 arch/sparc/vdso/vma.c 	if (!e->dynsym || !e->dynstr) {
e                 102 arch/sparc/vdso/vma.c static Elf64_Sym *find_sym64(const struct vdso_elfinfo64 *e, const char *name)
e                 106 arch/sparc/vdso/vma.c 	for (i = 0; i < (e->dynsymsize / sizeof(Elf64_Sym)); i++) {
e                 107 arch/sparc/vdso/vma.c 		Elf64_Sym *s = &e->dynsym[i];
e                 110 arch/sparc/vdso/vma.c 		if (!strcmp(e->dynstr + s->st_name, name))
e                 119 arch/sparc/vdso/vma.c 	struct vdso_elfinfo64 *e = &_e->u.elf64;
e                 120 arch/sparc/vdso/vma.c 	Elf64_Sym *osym = find_sym64(e, orig);
e                 121 arch/sparc/vdso/vma.c 	Elf64_Sym *nsym = find_sym64(e, new);
e                 136 arch/sparc/vdso/vma.c static void *one_section32(struct vdso_elfinfo32 *e, const char *name,
e                 143 arch/sparc/vdso/vma.c 	shdrs = (void *)e->hdr + e->hdr->e_shoff;
e                 144 arch/sparc/vdso/vma.c 	snames = (void *)e->hdr + shdrs[e->hdr->e_shstrndx].sh_offset;
e                 145 arch/sparc/vdso/vma.c 	for (i = 1; i < e->hdr->e_shnum; i++) {
e                 149 arch/sparc/vdso/vma.c 			return (void *)e->hdr + shdrs[i].sh_offset;
e                 157 arch/sparc/vdso/vma.c 	struct vdso_elfinfo32 *e = &_e->u.elf32;
e                 159 arch/sparc/vdso/vma.c 	e->hdr = image->data;
e                 160 arch/sparc/vdso/vma.c 	e->dynsym = one_section32(e, ".dynsym", &e->dynsymsize);
e                 161 arch/sparc/vdso/vma.c 	e->dynstr = one_section32(e, ".dynstr", NULL);
e                 163 arch/sparc/vdso/vma.c 	if (!e->dynsym || !e->dynstr) {
e                 170 arch/sparc/vdso/vma.c static Elf32_Sym *find_sym32(const struct vdso_elfinfo32 *e, const char *name)
e                 174 arch/sparc/vdso/vma.c 	for (i = 0; i < (e->dynsymsize / sizeof(Elf32_Sym)); i++) {
e                 175 arch/sparc/vdso/vma.c 		Elf32_Sym *s = &e->dynsym[i];
e                 178 arch/sparc/vdso/vma.c 		if (!strcmp(e->dynstr + s->st_name, name))
e                 187 arch/sparc/vdso/vma.c 	struct vdso_elfinfo32 *e = &_e->u.elf32;
e                 188 arch/sparc/vdso/vma.c 	Elf32_Sym *osym = find_sym32(e, orig);
e                 189 arch/sparc/vdso/vma.c 	Elf32_Sym *nsym = find_sym32(e, new);
e                 204 arch/sparc/vdso/vma.c static int find_sections(const struct vdso_image *image, struct vdso_elfinfo *e,
e                 208 arch/sparc/vdso/vma.c 		return find_sections64(image, e);
e                 210 arch/sparc/vdso/vma.c 		return find_sections32(image, e);
e                 213 arch/sparc/vdso/vma.c static int patch_one_symbol(struct vdso_elfinfo *e, const char *orig,
e                 217 arch/sparc/vdso/vma.c 		return patchsym64(e, orig, new_target);
e                 219 arch/sparc/vdso/vma.c 		return patchsym32(e, orig, new_target);
e                 222 arch/sparc/vdso/vma.c static int stick_patch(const struct vdso_image *image, struct vdso_elfinfo *e, bool elf64)
e                 226 arch/sparc/vdso/vma.c 	err = find_sections(image, e, elf64);
e                 230 arch/sparc/vdso/vma.c 	err = patch_one_symbol(e,
e                 236 arch/sparc/vdso/vma.c 	return patch_one_symbol(e,
e                  29 arch/um/include/asm/pgtable-2level.h #define pte_ERROR(e) \
e                  30 arch/um/include/asm/pgtable-2level.h         printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), \
e                  31 arch/um/include/asm/pgtable-2level.h 	       pte_val(e))
e                  32 arch/um/include/asm/pgtable-2level.h #define pgd_ERROR(e) \
e                  33 arch/um/include/asm/pgtable-2level.h         printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), \
e                  34 arch/um/include/asm/pgtable-2level.h 	       pgd_val(e))
e                  47 arch/um/include/asm/pgtable-3level.h #define pte_ERROR(e) \
e                  48 arch/um/include/asm/pgtable-3level.h         printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \
e                  49 arch/um/include/asm/pgtable-3level.h 	       pte_val(e))
e                  50 arch/um/include/asm/pgtable-3level.h #define pmd_ERROR(e) \
e                  51 arch/um/include/asm/pgtable-3level.h         printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
e                  52 arch/um/include/asm/pgtable-3level.h 	       pmd_val(e))
e                  53 arch/um/include/asm/pgtable-3level.h #define pgd_ERROR(e) \
e                  54 arch/um/include/asm/pgtable-3level.h         printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
e                  55 arch/um/include/asm/pgtable-3level.h 	       pgd_val(e))
e                  67 arch/um/include/shared/os.h 	unsigned int e : 1;	/* O_EXCL */
e                  72 arch/um/include/shared/os.h 					  .t = 0, .a = 0, .e = 0, .cl = 0 })
e                 124 arch/um/include/shared/os.h 	flags.e = 1;
e                 194 arch/um/os-Linux/file.c 	if (flags.e)
e                 147 arch/unicore32/include/asm/cacheflush.h #define flush_icache_range(s, e)	__cpuc_coherent_kern_range(s, e)
e                 169 arch/unicore32/include/asm/tlbflush.h #define local_flush_tlb_kernel_range(s, e)	\
e                 170 arch/unicore32/include/asm/tlbflush.h 	__cpu_flush_kern_tlb_range(s, e)
e                 712 arch/x86/boot/compressed/kaslr.c 	struct efi_info *e = &boot_params->efi_info;
e                 721 arch/x86/boot/compressed/kaslr.c 	signature = (char *)&e->efi_loader_signature;
e                 728 arch/x86/boot/compressed/kaslr.c 	if (e->efi_memmap_hi) {
e                 732 arch/x86/boot/compressed/kaslr.c 	pmap =  e->efi_memmap;
e                 734 arch/x86/boot/compressed/kaslr.c 	pmap = (e->efi_memmap | ((__u64)e->efi_memmap_hi << 32));
e                 737 arch/x86/boot/compressed/kaslr.c 	nr_desc = e->efi_memmap_size / e->efi_memdesc_size;
e                 739 arch/x86/boot/compressed/kaslr.c 		md = efi_early_memdesc_ptr(pmap, e->efi_memdesc_size, i);
e                 747 arch/x86/boot/compressed/kaslr.c 		md = efi_early_memdesc_ptr(pmap, e->efi_memdesc_size, i);
e                  41 arch/x86/boot/cpu.c 		u32 e = err_flags[i];
e                  50 arch/x86/boot/cpu.c 			if (e & 1) {
e                  58 arch/x86/boot/cpu.c 			e >>= 1;
e                  63 arch/x86/boot/cpu.c 		u32 e = err_flags[i];
e                  65 arch/x86/boot/cpu.c 			if (e & 1)
e                  67 arch/x86/boot/cpu.c 			e >>= 1;
e                  54 arch/x86/boot/early_serial_console.c 		char *e;
e                  71 arch/x86/boot/early_serial_console.c 			port = simple_strtoull(arg + pos, &e, 16);
e                  72 arch/x86/boot/early_serial_console.c 			if (port == 0 || arg + pos == e)
e                  75 arch/x86/boot/early_serial_console.c 				pos = e - arg;
e                  92 arch/x86/boot/early_serial_console.c 		baud = simple_strtoull(arg + pos, &e, 0);
e                  93 arch/x86/boot/early_serial_console.c 		if (baud == 0 || arg + pos == e)
e                 875 arch/x86/events/core.c 	struct perf_event *e;
e                 976 arch/x86/events/core.c 			e = cpuc->event_list[i];
e                 982 arch/x86/events/core.c 			e = cpuc->event_list[i];
e                 988 arch/x86/events/core.c 				x86_pmu.put_event_constraints(cpuc, e);
e                1102 arch/x86/events/intel/lbr.c 		struct perf_branch_entry *e = &cpuc->lbr_entries[i];
e                1104 arch/x86/events/intel/lbr.c 		e->from		= lbr->lbr[i].from;
e                1105 arch/x86/events/intel/lbr.c 		e->to		= lbr->lbr[i].to;
e                1106 arch/x86/events/intel/lbr.c 		e->mispred	= !!(info & LBR_INFO_MISPRED);
e                1107 arch/x86/events/intel/lbr.c 		e->predicted	= !(info & LBR_INFO_MISPRED);
e                1108 arch/x86/events/intel/lbr.c 		e->in_tx	= !!(info & LBR_INFO_IN_TX);
e                1109 arch/x86/events/intel/lbr.c 		e->abort	= !!(info & LBR_INFO_ABORT);
e                1110 arch/x86/events/intel/lbr.c 		e->cycles	= info & LBR_INFO_CYCLES;
e                1111 arch/x86/events/intel/lbr.c 		e->reserved	= 0;
e                 101 arch/x86/events/intel/uncore_snbep.c #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) {	\
e                 102 arch/x86/events/intel/uncore_snbep.c 	.event = (e),				\
e                 280 arch/x86/events/perf_event.h #define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) {	\
e                 283 arch/x86/events/perf_event.h 	.size = (e) - (c),		\
e                 300 arch/x86/events/perf_event.h #define EVENT_CONSTRAINT_RANGE(c, e, n, m) \
e                 301 arch/x86/events/perf_event.h 	__EVENT_CONSTRAINT_RANGE(c, e, n, m, HWEIGHT(n), 0, 0)
e                 340 arch/x86/events/perf_event.h #define INTEL_EVENT_CONSTRAINT_RANGE(c, e, n)			\
e                 341 arch/x86/events/perf_event.h 	EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT)
e                 390 arch/x86/events/perf_event.h #define INTEL_FLAGS_EVENT_CONSTRAINT_RANGE(c, e, n)			\
e                 391 arch/x86/events/perf_event.h 	EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS)
e                 463 arch/x86/events/perf_event.h #define for_each_event_constraint(e, c)	\
e                 464 arch/x86/events/perf_event.h 	for ((e) = (c); (e)->weight != -1; (e)++)
e                 485 arch/x86/events/perf_event.h #define EVENT_EXTRA_REG(e, ms, m, vm, i) {	\
e                 486 arch/x86/events/perf_event.h 	.event = (e),			\
e                  27 arch/x86/include/asm/asm.h #define __ASM_REG(reg)         __ASM_SEL_RAW(e##reg, r##reg)
e                  85 arch/x86/include/asm/atomic.h 	return GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, e, "er", i);
e                 125 arch/x86/include/asm/atomic.h 	return GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, e);
e                 139 arch/x86/include/asm/atomic.h 	return GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, e);
e                  76 arch/x86/include/asm/atomic64_64.h 	return GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, e, "er", i);
e                 118 arch/x86/include/asm/atomic64_64.h 	return GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, e);
e                 132 arch/x86/include/asm/atomic64_64.h 	return GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, e);
e                 246 arch/x86/include/asm/cmpxchg.h 		     CC_SET(e)						\
e                 247 arch/x86/include/asm/cmpxchg.h 		     : CC_OUT(e) (__ret),				\
e                1606 arch/x86/include/asm/kvm_host.h void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
e                  56 arch/x86/include/asm/local.h 	return GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, e, "er", i);
e                  69 arch/x86/include/asm/local.h 	return GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, e);
e                  82 arch/x86/include/asm/local.h 	return GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, e);
e                   5 arch/x86/include/asm/pgtable-2level.h #define pte_ERROR(e) \
e                   6 arch/x86/include/asm/pgtable-2level.h 	pr_err("%s:%d: bad pte %08lx\n", __FILE__, __LINE__, (e).pte_low)
e                   7 arch/x86/include/asm/pgtable-2level.h #define pgd_ERROR(e) \
e                   8 arch/x86/include/asm/pgtable-2level.h 	pr_err("%s:%d: bad pgd %08lx\n", __FILE__, __LINE__, pgd_val(e))
e                  14 arch/x86/include/asm/pgtable-3level.h #define pte_ERROR(e)							\
e                  16 arch/x86/include/asm/pgtable-3level.h 	       __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
e                  17 arch/x86/include/asm/pgtable-3level.h #define pmd_ERROR(e)							\
e                  19 arch/x86/include/asm/pgtable-3level.h 	       __FILE__, __LINE__, &(e), pmd_val(e))
e                  20 arch/x86/include/asm/pgtable-3level.h #define pgd_ERROR(e)							\
e                  22 arch/x86/include/asm/pgtable-3level.h 	       __FILE__, __LINE__, &(e), pgd_val(e))
e                  34 arch/x86/include/asm/pgtable_64.h #define pte_ERROR(e)					\
e                  36 arch/x86/include/asm/pgtable_64.h 	       __FILE__, __LINE__, &(e), pte_val(e))
e                  37 arch/x86/include/asm/pgtable_64.h #define pmd_ERROR(e)					\
e                  39 arch/x86/include/asm/pgtable_64.h 	       __FILE__, __LINE__, &(e), pmd_val(e))
e                  40 arch/x86/include/asm/pgtable_64.h #define pud_ERROR(e)					\
e                  42 arch/x86/include/asm/pgtable_64.h 	       __FILE__, __LINE__, &(e), pud_val(e))
e                  45 arch/x86/include/asm/pgtable_64.h #define p4d_ERROR(e)					\
e                  47 arch/x86/include/asm/pgtable_64.h 	       __FILE__, __LINE__, &(e), p4d_val(e))
e                  50 arch/x86/include/asm/pgtable_64.h #define pgd_ERROR(e)					\
e                  52 arch/x86/include/asm/pgtable_64.h 	       __FILE__, __LINE__, &(e), pgd_val(e))
e                  94 arch/x86/include/asm/preempt.h 	return GEN_UNARY_RMWcc("decl", __preempt_count, e, __percpu_arg([var]));
e                  72 arch/x86/include/asm/refcount.h 					 r->refs.counter, e, "er", i, "cx");
e                  86 arch/x86/include/asm/refcount.h 					 r->refs.counter, e, "cx");
e                  91 arch/x86/include/asm/xen/interface_64.h     uint64_t r ## name, e ## name; \
e                 335 arch/x86/kernel/apic/io_apic.c static void __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
e                 339 arch/x86/kernel/apic/io_apic.c 	eu.entry = e;
e                 344 arch/x86/kernel/apic/io_apic.c static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
e                 349 arch/x86/kernel/apic/io_apic.c 	__ioapic_write_entry(apic, pin, e);
e                  75 arch/x86/kernel/cpu/microcode/amd.c 		struct equiv_cpu_entry *e = &et->entry[i];
e                  77 arch/x86/kernel/cpu/microcode/amd.c 		if (sig == e->installed_cpu)
e                  78 arch/x86/kernel/cpu/microcode/amd.c 			return e->equiv_cpu;
e                  80 arch/x86/kernel/cpu/microcode/amd.c 		e++;
e                 403 arch/x86/kernel/cpu/resctrl/internal.h static inline bool is_mbm_event(int e)
e                 405 arch/x86/kernel/cpu/resctrl/internal.h 	return (e >= QOS_L3_MBM_TOTAL_EVENT_ID &&
e                 406 arch/x86/kernel/cpu/resctrl/internal.h 		e <= QOS_L3_MBM_LOCAL_EVENT_ID);
e                 155 arch/x86/kernel/early_printk.c 	char *e;
e                 163 arch/x86/kernel/early_printk.c 			early_serial_base = simple_strtoul(s, &e, 16);
e                 169 arch/x86/kernel/early_printk.c 			port = simple_strtoul(s, &e, 10);
e                 170 arch/x86/kernel/early_printk.c 			if (port > 1 || s == e)
e                 180 arch/x86/kernel/early_printk.c 		baud = simple_strtoull(s, &e, 0);
e                 182 arch/x86/kernel/early_printk.c 		if (baud == 0 || s == e)
e                 227 arch/x86/kernel/early_printk.c 	char *e;
e                 245 arch/x86/kernel/early_printk.c 	bus = (u8)simple_strtoul(s, &e, 16);
e                 246 arch/x86/kernel/early_printk.c 	s = e;
e                 250 arch/x86/kernel/early_printk.c 	slot = (u8)simple_strtoul(s, &e, 16);
e                 251 arch/x86/kernel/early_printk.c 	s = e;
e                 255 arch/x86/kernel/early_printk.c 	func = (u8)simple_strtoul(s, &e, 16);
e                 256 arch/x86/kernel/early_printk.c 	s = e;
e                 106 arch/x86/kernel/kvm.c 	struct kvm_task_sleep_node n, *e;
e                 112 arch/x86/kernel/kvm.c 	e = _find_apf_task(b, token);
e                 113 arch/x86/kernel/kvm.c 	if (e) {
e                 115 arch/x86/kernel/kvm.c 		hlist_del(&e->link);
e                 116 arch/x86/kernel/kvm.c 		kfree(e);
e                 166 arch/x86/kvm/cpuid.c 	struct kvm_cpuid_entry2 *e, *entry;
e                 170 arch/x86/kvm/cpuid.c 		e = &vcpu->arch.cpuid_entries[i];
e                 171 arch/x86/kvm/cpuid.c 		if (e->function == 0x80000001) {
e                 172 arch/x86/kvm/cpuid.c 			entry = e;
e                 930 arch/x86/kvm/cpuid.c 	struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
e                 935 arch/x86/kvm/cpuid.c 	e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
e                 940 arch/x86/kvm/cpuid.c 	} while (ej->function != e->function);
e                 949 arch/x86/kvm/cpuid.c static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
e                 952 arch/x86/kvm/cpuid.c 	if (e->function != function)
e                 954 arch/x86/kvm/cpuid.c 	if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
e                 956 arch/x86/kvm/cpuid.c 	if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
e                 957 arch/x86/kvm/cpuid.c 	    !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
e                 969 arch/x86/kvm/cpuid.c 		struct kvm_cpuid_entry2 *e;
e                 971 arch/x86/kvm/cpuid.c 		e = &vcpu->arch.cpuid_entries[i];
e                 972 arch/x86/kvm/cpuid.c 		if (is_matching_cpuid_entry(e, function, index)) {
e                 973 arch/x86/kvm/cpuid.c 			if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
e                 975 arch/x86/kvm/cpuid.c 			best = e;
e                 369 arch/x86/kvm/hyperv.c 	struct kvm_kernel_irq_routing_entry *e;
e                 376 arch/x86/kvm/hyperv.c 		hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
e                 377 arch/x86/kvm/hyperv.c 			if (e->type == KVM_IRQ_ROUTING_HV_SINT)
e                 378 arch/x86/kvm/hyperv.c 				kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu,
e                 379 arch/x86/kvm/hyperv.c 						    e->hv_sint.sint, gsi);
e                 110 arch/x86/kvm/ioapic.c 	union kvm_ioapic_redirect_entry *e;
e                 112 arch/x86/kvm/ioapic.c 	e = &ioapic->redirtbl[RTC_GSI];
e                 113 arch/x86/kvm/ioapic.c 	if (!kvm_apic_match_dest(vcpu, NULL, 0,	e->fields.dest_id,
e                 114 arch/x86/kvm/ioapic.c 				e->fields.dest_mode))
e                 117 arch/x86/kvm/ioapic.c 	new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector);
e                 125 arch/x86/kvm/ioapic.c 		dest_map->vectors[vcpu->vcpu_id] = e->fields.vector;
e                 240 arch/x86/kvm/ioapic.c 	union kvm_ioapic_redirect_entry *e;
e                 251 arch/x86/kvm/ioapic.c 		e = &ioapic->redirtbl[index];
e                 252 arch/x86/kvm/ioapic.c 		if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
e                 256 arch/x86/kvm/ioapic.c 			             e->fields.dest_id, e->fields.dest_mode) ||
e                 257 arch/x86/kvm/ioapic.c 			    kvm_apic_pending_eoi(vcpu, e->fields.vector))
e                 258 arch/x86/kvm/ioapic.c 				__set_bit(e->fields.vector,
e                 277 arch/x86/kvm/ioapic.c 	union kvm_ioapic_redirect_entry *e;
e                 297 arch/x86/kvm/ioapic.c 		e = &ioapic->redirtbl[index];
e                 298 arch/x86/kvm/ioapic.c 		mask_before = e->fields.mask;
e                 300 arch/x86/kvm/ioapic.c 		old_remote_irr = e->fields.remote_irr;
e                 301 arch/x86/kvm/ioapic.c 		old_delivery_status = e->fields.delivery_status;
e                 303 arch/x86/kvm/ioapic.c 			e->bits &= 0xffffffff;
e                 304 arch/x86/kvm/ioapic.c 			e->bits |= (u64) val << 32;
e                 306 arch/x86/kvm/ioapic.c 			e->bits &= ~0xffffffffULL;
e                 307 arch/x86/kvm/ioapic.c 			e->bits |= (u32) val;
e                 309 arch/x86/kvm/ioapic.c 		e->fields.remote_irr = old_remote_irr;
e                 310 arch/x86/kvm/ioapic.c 		e->fields.delivery_status = old_delivery_status;
e                 318 arch/x86/kvm/ioapic.c 		if (e->fields.trig_mode == IOAPIC_EDGE_TRIG)
e                 319 arch/x86/kvm/ioapic.c 			e->fields.remote_irr = 0;
e                 321 arch/x86/kvm/ioapic.c 		mask_after = e->fields.mask;
e                 324 arch/x86/kvm/ioapic.c 		if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG
e                  30 arch/x86/kvm/irq_comm.c static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
e                  35 arch/x86/kvm/irq_comm.c 	return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level);
e                  38 arch/x86/kvm/irq_comm.c static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
e                  43 arch/x86/kvm/irq_comm.c 	return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level,
e                 104 arch/x86/kvm/irq_comm.c void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
e                 107 arch/x86/kvm/irq_comm.c 	trace_kvm_msi_set_irq(e->msi.address_lo | (kvm->arch.x2apic_format ?
e                 108 arch/x86/kvm/irq_comm.c 	                                     (u64)e->msi.address_hi << 32 : 0),
e                 109 arch/x86/kvm/irq_comm.c 	                      e->msi.data);
e                 111 arch/x86/kvm/irq_comm.c 	irq->dest_id = (e->msi.address_lo &
e                 114 arch/x86/kvm/irq_comm.c 		irq->dest_id |= MSI_ADDR_EXT_DEST_ID(e->msi.address_hi);
e                 115 arch/x86/kvm/irq_comm.c 	irq->vector = (e->msi.data &
e                 117 arch/x86/kvm/irq_comm.c 	irq->dest_mode = (1 << MSI_ADDR_DEST_MODE_SHIFT) & e->msi.address_lo;
e                 118 arch/x86/kvm/irq_comm.c 	irq->trig_mode = (1 << MSI_DATA_TRIGGER_SHIFT) & e->msi.data;
e                 119 arch/x86/kvm/irq_comm.c 	irq->delivery_mode = e->msi.data & 0x700;
e                 120 arch/x86/kvm/irq_comm.c 	irq->msi_redir_hint = ((e->msi.address_lo
e                 128 arch/x86/kvm/irq_comm.c 		struct kvm_kernel_irq_routing_entry *e)
e                 130 arch/x86/kvm/irq_comm.c 	return kvm->arch.x2apic_format && (e->msi.address_hi & 0xff);
e                 133 arch/x86/kvm/irq_comm.c int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
e                 138 arch/x86/kvm/irq_comm.c 	if (kvm_msi_route_invalid(kvm, e))
e                 144 arch/x86/kvm/irq_comm.c 	kvm_set_msi_irq(kvm, e, &irq);
e                 150 arch/x86/kvm/irq_comm.c static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e,
e                 157 arch/x86/kvm/irq_comm.c 	return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint);
e                 160 arch/x86/kvm/irq_comm.c int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
e                 167 arch/x86/kvm/irq_comm.c 	switch (e->type) {
e                 169 arch/x86/kvm/irq_comm.c 		return kvm_hv_set_sint(e, kvm, irq_source_id, level,
e                 173 arch/x86/kvm/irq_comm.c 		if (kvm_msi_route_invalid(kvm, e))
e                 176 arch/x86/kvm/irq_comm.c 		kvm_set_msi_irq(kvm, e, &irq);
e                 272 arch/x86/kvm/irq_comm.c 			  struct kvm_kernel_irq_routing_entry *e,
e                 283 arch/x86/kvm/irq_comm.c 		e->irqchip.pin = ue->u.irqchip.pin;
e                 286 arch/x86/kvm/irq_comm.c 			e->irqchip.pin += PIC_NUM_PINS / 2;
e                 291 arch/x86/kvm/irq_comm.c 			e->set = kvm_set_pic_irq;
e                 296 arch/x86/kvm/irq_comm.c 			e->set = kvm_set_ioapic_irq;
e                 301 arch/x86/kvm/irq_comm.c 		e->irqchip.irqchip = ue->u.irqchip.irqchip;
e                 304 arch/x86/kvm/irq_comm.c 		e->set = kvm_set_msi;
e                 305 arch/x86/kvm/irq_comm.c 		e->msi.address_lo = ue->u.msi.address_lo;
e                 306 arch/x86/kvm/irq_comm.c 		e->msi.address_hi = ue->u.msi.address_hi;
e                 307 arch/x86/kvm/irq_comm.c 		e->msi.data = ue->u.msi.data;
e                 309 arch/x86/kvm/irq_comm.c 		if (kvm_msi_route_invalid(kvm, e))
e                 313 arch/x86/kvm/irq_comm.c 		e->set = kvm_hv_set_sint;
e                 314 arch/x86/kvm/irq_comm.c 		e->hv_sint.vcpu = ue->u.hv_sint.vcpu;
e                 315 arch/x86/kvm/irq_comm.c 		e->hv_sint.sint = ue->u.hv_sint.sint;
e                  46 arch/x86/kvm/mmu.h static inline u64 rsvd_bits(int s, int e)
e                  48 arch/x86/kvm/mmu.h 	if (e < s)
e                  51 arch/x86/kvm/mmu.h 	return ((1ULL << (e - s + 1)) - 1) << s;
e                5287 arch/x86/kvm/svm.c get_pi_vcpu_info(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
e                5293 arch/x86/kvm/svm.c 	kvm_set_msi_irq(kvm, e, &irq);
e                5323 arch/x86/kvm/svm.c 	struct kvm_kernel_irq_routing_entry *e;
e                5338 arch/x86/kvm/svm.c 	hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
e                5342 arch/x86/kvm/svm.c 		if (e->type != KVM_IRQ_ROUTING_MSI)
e                5352 arch/x86/kvm/svm.c 		if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set &&
e                5404 arch/x86/kvm/svm.c 						 e->gsi, vcpu_info.vector,
e                 846 arch/x86/kvm/vmx/nested.c 				       struct vmx_msr_entry *e)
e                 849 arch/x86/kvm/vmx/nested.c 	if (CC(vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8))
e                 851 arch/x86/kvm/vmx/nested.c 	if (CC(e->index == MSR_IA32_UCODE_WRITE) || /* SDM Table 35-2 */
e                 852 arch/x86/kvm/vmx/nested.c 	    CC(e->index == MSR_IA32_UCODE_REV))
e                 854 arch/x86/kvm/vmx/nested.c 	if (CC(e->reserved != 0))
e                 860 arch/x86/kvm/vmx/nested.c 				     struct vmx_msr_entry *e)
e                 862 arch/x86/kvm/vmx/nested.c 	if (CC(e->index == MSR_FS_BASE) ||
e                 863 arch/x86/kvm/vmx/nested.c 	    CC(e->index == MSR_GS_BASE) ||
e                 864 arch/x86/kvm/vmx/nested.c 	    CC(e->index == MSR_IA32_SMM_MONITOR_CTL) || /* SMM is not supported */
e                 865 arch/x86/kvm/vmx/nested.c 	    nested_vmx_msr_check_common(vcpu, e))
e                 871 arch/x86/kvm/vmx/nested.c 				      struct vmx_msr_entry *e)
e                 873 arch/x86/kvm/vmx/nested.c 	if (CC(e->index == MSR_IA32_SMBASE) || /* SMM is not supported */
e                 874 arch/x86/kvm/vmx/nested.c 	    nested_vmx_msr_check_common(vcpu, e))
e                 900 arch/x86/kvm/vmx/nested.c 	struct vmx_msr_entry e;
e                 907 arch/x86/kvm/vmx/nested.c 		if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
e                 908 arch/x86/kvm/vmx/nested.c 					&e, sizeof(e))) {
e                 911 arch/x86/kvm/vmx/nested.c 				__func__, i, gpa + i * sizeof(e));
e                 914 arch/x86/kvm/vmx/nested.c 		if (nested_vmx_load_msr_check(vcpu, &e)) {
e                 917 arch/x86/kvm/vmx/nested.c 				__func__, i, e.index, e.reserved);
e                 920 arch/x86/kvm/vmx/nested.c 		if (kvm_set_msr(vcpu, e.index, e.value)) {
e                 923 arch/x86/kvm/vmx/nested.c 				__func__, i, e.index, e.value);
e                 936 arch/x86/kvm/vmx/nested.c 	struct vmx_msr_entry e;
e                 944 arch/x86/kvm/vmx/nested.c 					gpa + i * sizeof(e),
e                 945 arch/x86/kvm/vmx/nested.c 					&e, 2 * sizeof(u32))) {
e                 948 arch/x86/kvm/vmx/nested.c 				__func__, i, gpa + i * sizeof(e));
e                 951 arch/x86/kvm/vmx/nested.c 		if (nested_vmx_store_msr_check(vcpu, &e)) {
e                 954 arch/x86/kvm/vmx/nested.c 				__func__, i, e.index, e.reserved);
e                 957 arch/x86/kvm/vmx/nested.c 		if (kvm_get_msr(vcpu, e.index, &data)) {
e                 960 arch/x86/kvm/vmx/nested.c 				__func__, i, e.index);
e                 964 arch/x86/kvm/vmx/nested.c 					 gpa + i * sizeof(e) +
e                 969 arch/x86/kvm/vmx/nested.c 				__func__, i, e.index, data);
e                4334 arch/x86/kvm/vmx/nested.c 	struct x86_exception e;
e                4341 arch/x86/kvm/vmx/nested.c 	if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) {
e                4342 arch/x86/kvm/vmx/nested.c 		kvm_inject_page_fault(vcpu, &e);
e                4610 arch/x86/kvm/vmx/nested.c 	struct x86_exception e;
e                4653 arch/x86/kvm/vmx/nested.c 		if (kvm_write_guest_virt_system(vcpu, gva, &field_value, len, &e)) {
e                4654 arch/x86/kvm/vmx/nested.c 			kvm_inject_page_fault(vcpu, &e);
e                4702 arch/x86/kvm/vmx/nested.c 	struct x86_exception e;
e                4727 arch/x86/kvm/vmx/nested.c 		if (kvm_read_guest_virt(vcpu, gva, &field_value, len, &e)) {
e                4728 arch/x86/kvm/vmx/nested.c 			kvm_inject_page_fault(vcpu, &e);
e                4879 arch/x86/kvm/vmx/nested.c 	struct x86_exception e;
e                4893 arch/x86/kvm/vmx/nested.c 					sizeof(gpa_t), &e)) {
e                4894 arch/x86/kvm/vmx/nested.c 		kvm_inject_page_fault(vcpu, &e);
e                4907 arch/x86/kvm/vmx/nested.c 	struct x86_exception e;
e                4937 arch/x86/kvm/vmx/nested.c 	if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
e                4938 arch/x86/kvm/vmx/nested.c 		kvm_inject_page_fault(vcpu, &e);
e                4964 arch/x86/kvm/vmx/nested.c 	struct x86_exception e;
e                4997 arch/x86/kvm/vmx/nested.c 	if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
e                4998 arch/x86/kvm/vmx/nested.c 		kvm_inject_page_fault(vcpu, &e);
e                5390 arch/x86/kvm/vmx/vmx.c 	struct x86_exception e;
e                5419 arch/x86/kvm/vmx/vmx.c 	if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
e                5420 arch/x86/kvm/vmx/vmx.c 		kvm_inject_page_fault(vcpu, &e);
e                7461 arch/x86/kvm/vmx/vmx.c 	struct kvm_kernel_irq_routing_entry *e;
e                7482 arch/x86/kvm/vmx/vmx.c 	hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
e                7483 arch/x86/kvm/vmx/vmx.c 		if (e->type != KVM_IRQ_ROUTING_MSI)
e                7501 arch/x86/kvm/vmx/vmx.c 		kvm_set_msi_irq(kvm, e, &irq);
e                7522 arch/x86/kvm/vmx/vmx.c 		trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi,
e                5551 arch/x86/kvm/x86.c 	struct x86_exception e;
e                5555 arch/x86/kvm/x86.c 				sig, sizeof(sig), &e) == 0 &&
e                 369 arch/x86/math-emu/fpu_trig.c 		long e;
e                 378 arch/x86/math-emu/fpu_trig.c 		e = exponent16(st_new_ptr);
e                 379 arch/x86/math-emu/fpu_trig.c 		convert_l2reg(&e, 1);
e                1026 arch/x86/math-emu/fpu_trig.c 	int e, tag;
e                1044 arch/x86/math-emu/fpu_trig.c 				e = exponent16(st0_ptr);
e                1045 arch/x86/math-emu/fpu_trig.c 				if (e >= 0) {
e                1046 arch/x86/math-emu/fpu_trig.c 					exponent.sigh = e;
e                1049 arch/x86/math-emu/fpu_trig.c 					exponent.sigh = -e;
e                  20 arch/x86/math-emu/reg_constant.c #define MAKE_REG(s, e, l, h) { l, h, \
e                  21 arch/x86/math-emu/reg_constant.c 		(u16)((EXTENDED_Ebias+(e)) | ((SIGN_##s != 0)*0x8000)) }
e                 431 arch/x86/mm/dump_pagetables.c #define walk_pmd_level(m,s,a,e,p) walk_pte_level(m,s,__pmd(pud_val(a)),e,p)
e                 466 arch/x86/mm/dump_pagetables.c #define walk_pud_level(m,s,a,e,p) walk_pmd_level(m,s,__pud(p4d_val(a)),e,p)
e                 191 arch/x86/mm/extable.c 	const struct exception_table_entry *e;
e                 194 arch/x86/mm/extable.c 	e = search_exception_tables(ip);
e                 195 arch/x86/mm/extable.c 	if (!e)
e                 197 arch/x86/mm/extable.c 	handler = ex_fixup_handler(e);
e                 205 arch/x86/mm/extable.c 	const struct exception_table_entry *e;
e                 222 arch/x86/mm/extable.c 	e = search_exception_tables(regs->ip);
e                 223 arch/x86/mm/extable.c 	if (!e)
e                 226 arch/x86/mm/extable.c 	handler = ex_fixup_handler(e);
e                 227 arch/x86/mm/extable.c 	return handler(e, regs, trapnr, error_code, fault_addr);
e                 444 arch/x86/mm/numa.c 		u64 e = mi->blk[i].end >> PAGE_SHIFT;
e                 445 arch/x86/mm/numa.c 		numaram += e - s;
e                 446 arch/x86/mm/numa.c 		numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
e                 124 arch/x86/pci/irq.c 	struct irq_info *e;
e                 128 arch/x86/pci/irq.c 		e = &rt->slots[i];
e                 132 arch/x86/pci/irq.c 			DBG(KERN_DEBUG "%02x:%02x slot=%02x", e->bus, e->devfn/8, e->slot);
e                 134 arch/x86/pci/irq.c 				DBG(" %d:%02x/%04x", j, e->irq[j].link, e->irq[j].bitmap);
e                 138 arch/x86/pci/irq.c 		busmap[e->bus] = 1;
e                 202 arch/x86/platform/efi/efi.c 	struct efi_info *e = &boot_params.efi_info;
e                 212 arch/x86/platform/efi/efi.c 	if (e->efi_memmap_hi) {
e                 216 arch/x86/platform/efi/efi.c 	pmap =  e->efi_memmap;
e                 218 arch/x86/platform/efi/efi.c 	pmap = (e->efi_memmap |	((__u64)e->efi_memmap_hi << 32));
e                 221 arch/x86/platform/efi/efi.c 	data.size 		= e->efi_memmap_size;
e                 222 arch/x86/platform/efi/efi.c 	data.desc_size		= e->efi_memdesc_size;
e                 223 arch/x86/platform/efi/efi.c 	data.desc_version	= e->efi_memdesc_version;
e                1565 arch/x86/platform/uv/tlb_uv.c 	int e = ARRAY_SIZE(tunables);
e                1575 arch/x86/platform/uv/tlb_uv.c 	if (cnt != e) {
e                1576 arch/x86/platform/uv/tlb_uv.c 		pr_info("bau tunable error: should be %d values\n", e);
e                 225 arch/xtensa/include/asm/pgtable.h #define pte_ERROR(e) \
e                 226 arch/xtensa/include/asm/pgtable.h 	printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
e                 227 arch/xtensa/include/asm/pgtable.h #define pgd_ERROR(e) \
e                 228 arch/xtensa/include/asm/pgtable.h 	printk("%s:%d: bad pgd entry %08lx.\n", __FILE__, __LINE__, pgd_val(e))
e                  74 arch/xtensa/kernel/jump_label.c void arch_jump_label_transform(struct jump_entry *e,
e                  77 arch/xtensa/kernel/jump_label.c 	u32 d = (jump_entry_target(e) - (jump_entry_code(e) + 4));
e                  94 arch/xtensa/kernel/jump_label.c 	patch_text(jump_entry_code(e), &insn, JUMP_LABEL_NOP_SIZE);
e                  30 arch/xtensa/mm/tlb.c 			int e = w + (i << PAGE_SHIFT);
e                  31 arch/xtensa/mm/tlb.c 			invalidate_itlb_entry_no_isync(e);
e                  43 arch/xtensa/mm/tlb.c 			int e = w + (i << PAGE_SHIFT);
e                  44 arch/xtensa/mm/tlb.c 			invalidate_dtlb_entry_no_isync(e);
e                 214 arch/xtensa/mm/tlb.c static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
e                 216 arch/xtensa/mm/tlb.c 	unsigned tlbidx = w | (e << PAGE_SHIFT);
e                 221 arch/xtensa/mm/tlb.c 	unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
e                 230 arch/xtensa/mm/tlb.c 				dtlb ? 'D' : 'I', w, e, vpn,
e                 238 arch/xtensa/mm/tlb.c 					dtlb ? 'D' : 'I', w, e, r0, r1, pte);
e                 259 arch/xtensa/mm/tlb.c 	unsigned w, e;
e                 264 arch/xtensa/mm/tlb.c 		for (e = 0; e < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); ++e)
e                 265 arch/xtensa/mm/tlb.c 			bug |= check_tlb_entry(w, e, true);
e                 267 arch/xtensa/mm/tlb.c 		for (e = 0; e < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); ++e)
e                 268 arch/xtensa/mm/tlb.c 			bug |= check_tlb_entry(w, e, false);
e                 208 block/badblocks.c 		sector_t e = a + BB_LEN(p[lo]);
e                 211 block/badblocks.c 		if (e >= s) {
e                 213 block/badblocks.c 			if (s == a && s + sectors >= e)
e                 219 block/badblocks.c 			if (e < s + sectors)
e                 220 block/badblocks.c 				e = s + sectors;
e                 221 block/badblocks.c 			if (e - a <= BB_MAX_LEN) {
e                 222 block/badblocks.c 				p[lo] = BB_MAKE(a, e-a, ack);
e                 223 block/badblocks.c 				s = e;
e                 232 block/badblocks.c 			sectors = e - s;
e                 240 block/badblocks.c 		sector_t e = a + BB_LEN(p[hi]);
e                 245 block/badblocks.c 			if (e <= s + sectors) {
e                 247 block/badblocks.c 				e = s + sectors;
e                 253 block/badblocks.c 			if (e - a <= BB_MAX_LEN) {
e                 254 block/badblocks.c 				p[hi] = BB_MAKE(a, e-a, ack);
e                 255 block/badblocks.c 				s = e;
e                 260 block/badblocks.c 			sectors = e - s;
e                6378 block/bfq-iosched.c static void bfq_exit_queue(struct elevator_queue *e)
e                6380 block/bfq-iosched.c 	struct bfq_data *bfqd = e->elevator_data;
e                6423 block/bfq-iosched.c static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
e                6428 block/bfq-iosched.c 	eq = elevator_alloc(q, e);
e                6585 block/bfq-iosched.c static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
e                6587 block/bfq-iosched.c 	struct bfq_data *bfqd = e->elevator_data;			\
e                6607 block/bfq-iosched.c static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
e                6609 block/bfq-iosched.c 	struct bfq_data *bfqd = e->elevator_data;			\
e                6619 block/bfq-iosched.c __FUNC(struct elevator_queue *e, const char *page, size_t count)	\
e                6621 block/bfq-iosched.c 	struct bfq_data *bfqd = e->elevator_data;			\
e                6651 block/bfq-iosched.c static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\
e                6653 block/bfq-iosched.c 	struct bfq_data *bfqd = e->elevator_data;			\
e                6671 block/bfq-iosched.c static ssize_t bfq_max_budget_store(struct elevator_queue *e,
e                6674 block/bfq-iosched.c 	struct bfq_data *bfqd = e->elevator_data;
e                6699 block/bfq-iosched.c static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
e                6702 block/bfq-iosched.c 	struct bfq_data *bfqd = e->elevator_data;
e                6722 block/bfq-iosched.c static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e,
e                6725 block/bfq-iosched.c 	struct bfq_data *bfqd = e->elevator_data;
e                6744 block/bfq-iosched.c static ssize_t bfq_low_latency_store(struct elevator_queue *e,
e                6747 block/bfq-iosched.c 	struct bfq_data *bfqd = e->elevator_data;
e                 919 block/blk-mq-debugfs.c 	struct elevator_type *e = q->elevator->type;
e                 928 block/blk-mq-debugfs.c 	if (!e->queue_debugfs_attrs)
e                 933 block/blk-mq-debugfs.c 	debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
e                 975 block/blk-mq-debugfs.c 	struct elevator_type *e = q->elevator->type;
e                 977 block/blk-mq-debugfs.c 	if (!e->hctx_debugfs_attrs)
e                 983 block/blk-mq-debugfs.c 			     e->hctx_debugfs_attrs);
e                  91 block/blk-mq-sched.c 	struct elevator_queue *e = q->elevator;
e                  97 block/blk-mq-sched.c 		if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
e                 103 block/blk-mq-sched.c 		rq = e->type->ops.dispatch_request(hctx);
e                 173 block/blk-mq-sched.c 	struct elevator_queue *e = q->elevator;
e                 174 block/blk-mq-sched.c 	const bool has_sched_dispatch = e && e->type->ops.dispatch_request;
e                 327 block/blk-mq-sched.c 	struct elevator_queue *e = q->elevator;
e                 333 block/blk-mq-sched.c 	if (e && e->type->ops.bio_merge)
e                 334 block/blk-mq-sched.c 		return e->type->ops.bio_merge(hctx, bio, nr_segs);
e                 388 block/blk-mq-sched.c 	struct elevator_queue *e = q->elevator;
e                 398 block/blk-mq-sched.c 	WARN_ON(e && (rq->tag != -1));
e                 400 block/blk-mq-sched.c 	if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) {
e                 427 block/blk-mq-sched.c 	if (e && e->type->ops.insert_requests) {
e                 431 block/blk-mq-sched.c 		e->type->ops.insert_requests(hctx, &list, at_head);
e                 447 block/blk-mq-sched.c 	struct elevator_queue *e;
e                 457 block/blk-mq-sched.c 	e = hctx->queue->elevator;
e                 458 block/blk-mq-sched.c 	if (e && e->type->ops.insert_requests)
e                 459 block/blk-mq-sched.c 		e->type->ops.insert_requests(hctx, list, false);
e                 466 block/blk-mq-sched.c 		if (!hctx->dispatch_busy && !e && !run_queue_async) {
e                 523 block/blk-mq-sched.c int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
e                 530 block/blk-mq-sched.c 	if (!e) {
e                 550 block/blk-mq-sched.c 	ret = e->ops.init_sched(q, e);
e                 557 block/blk-mq-sched.c 		if (e->ops.init_hctx) {
e                 558 block/blk-mq-sched.c 			ret = e->ops.init_hctx(hctx, i);
e                 594 block/blk-mq-sched.c void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
e                 601 block/blk-mq-sched.c 		if (e->type->ops.exit_hctx && hctx->sched_data) {
e                 602 block/blk-mq-sched.c 			e->type->ops.exit_hctx(hctx, i);
e                 607 block/blk-mq-sched.c 	if (e->type->ops.exit_sched)
e                 608 block/blk-mq-sched.c 		e->type->ops.exit_sched(e);
e                  30 block/blk-mq-sched.h int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
e                  31 block/blk-mq-sched.h void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
e                  48 block/blk-mq-sched.h 	struct elevator_queue *e = q->elevator;
e                  50 block/blk-mq-sched.h 	if (e && e->type->ops.allow_merge)
e                  51 block/blk-mq-sched.h 		return e->type->ops.allow_merge(q, rq, bio);
e                  58 block/blk-mq-sched.h 	struct elevator_queue *e = rq->q->elevator;
e                  60 block/blk-mq-sched.h 	if (e && e->type->ops.completed_request)
e                  61 block/blk-mq-sched.h 		e->type->ops.completed_request(rq, now);
e                  67 block/blk-mq-sched.h 	struct elevator_queue *e = q->elevator;
e                  69 block/blk-mq-sched.h 	if (e && e->type->ops.requeue_request)
e                  70 block/blk-mq-sched.h 		e->type->ops.requeue_request(rq);
e                  75 block/blk-mq-sched.h 	struct elevator_queue *e = hctx->queue->elevator;
e                  77 block/blk-mq-sched.h 	if (e && e->type->ops.has_work)
e                  78 block/blk-mq-sched.h 		return e->type->ops.has_work(hctx);
e                 360 block/blk-mq.c 	struct elevator_queue *e = q->elevator;
e                 383 block/blk-mq.c 	if (e) {
e                 392 block/blk-mq.c 		    e->type->ops.limit_depth &&
e                 394 block/blk-mq.c 			e->type->ops.limit_depth(data->cmd_flags, data);
e                 410 block/blk-mq.c 		if (e && e->type->ops.prepare_request) {
e                 411 block/blk-mq.c 			if (e->type->icq_cache)
e                 414 block/blk-mq.c 			e->type->ops.prepare_request(rq, bio);
e                 512 block/blk-mq.c 	struct elevator_queue *e = q->elevator;
e                 517 block/blk-mq.c 		if (e && e->type->ops.finish_request)
e                 518 block/blk-mq.c 			e->type->ops.finish_request(rq);
e                 207 block/blk.h    		struct elevator_queue *e)
e                 212 block/blk.h    	__elevator_exit(q, e);
e                  63 block/elevator.c 	struct elevator_queue *e = q->elevator;
e                  65 block/elevator.c 	if (e->type->ops.allow_merge)
e                  66 block/elevator.c 		return e->type->ops.allow_merge(q, rq, bio);
e                 101 block/elevator.c static bool elevator_match(const struct elevator_type *e, const char *name,
e                 104 block/elevator.c 	if (!elv_support_features(e->elevator_features, required_features))
e                 106 block/elevator.c 	if (!strcmp(e->elevator_name, name))
e                 108 block/elevator.c 	if (e->elevator_alias && !strcmp(e->elevator_alias, name))
e                 125 block/elevator.c 	struct elevator_type *e;
e                 127 block/elevator.c 	list_for_each_entry(e, &elv_list, list) {
e                 128 block/elevator.c 		if (elevator_match(e, name, required_features))
e                 129 block/elevator.c 			return e;
e                 135 block/elevator.c static void elevator_put(struct elevator_type *e)
e                 137 block/elevator.c 	module_put(e->elevator_owner);
e                 143 block/elevator.c 	struct elevator_type *e;
e                 147 block/elevator.c 	e = elevator_find(name, q->required_elevator_features);
e                 148 block/elevator.c 	if (!e && try_loading) {
e                 152 block/elevator.c 		e = elevator_find(name, q->required_elevator_features);
e                 155 block/elevator.c 	if (e && !try_module_get(e->elevator_owner))
e                 156 block/elevator.c 		e = NULL;
e                 159 block/elevator.c 	return e;
e                 165 block/elevator.c 				  struct elevator_type *e)
e                 173 block/elevator.c 	eq->type = e;
e                 184 block/elevator.c 	struct elevator_queue *e;
e                 186 block/elevator.c 	e = container_of(kobj, struct elevator_queue, kobj);
e                 187 block/elevator.c 	elevator_put(e->type);
e                 188 block/elevator.c 	kfree(e);
e                 191 block/elevator.c void __elevator_exit(struct request_queue *q, struct elevator_queue *e)
e                 193 block/elevator.c 	mutex_lock(&e->sysfs_lock);
e                 194 block/elevator.c 	if (e->type->ops.exit_sched)
e                 195 block/elevator.c 		blk_mq_exit_sched(q, e);
e                 196 block/elevator.c 	mutex_unlock(&e->sysfs_lock);
e                 198 block/elevator.c 	kobject_put(&e->kobj);
e                 216 block/elevator.c 	struct elevator_queue *e = q->elevator;
e                 219 block/elevator.c 	hash_add(e->hash, &rq->hash, rq_hash_key(rq));
e                 232 block/elevator.c 	struct elevator_queue *e = q->elevator;
e                 236 block/elevator.c 	hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
e                 307 block/elevator.c 	struct elevator_queue *e = q->elevator;
e                 343 block/elevator.c 	if (e->type->ops.request_merge)
e                 344 block/elevator.c 		return e->type->ops.request_merge(q, req, bio);
e                 393 block/elevator.c 	struct elevator_queue *e = q->elevator;
e                 395 block/elevator.c 	if (e->type->ops.request_merged)
e                 396 block/elevator.c 		e->type->ops.request_merged(q, rq, type);
e                 407 block/elevator.c 	struct elevator_queue *e = q->elevator;
e                 409 block/elevator.c 	if (e->type->ops.requests_merged)
e                 410 block/elevator.c 		e->type->ops.requests_merged(q, rq, next);
e                 418 block/elevator.c 	struct elevator_queue *e = q->elevator;
e                 420 block/elevator.c 	if (e->type->ops.next_request)
e                 421 block/elevator.c 		return e->type->ops.next_request(q, rq);
e                 428 block/elevator.c 	struct elevator_queue *e = q->elevator;
e                 430 block/elevator.c 	if (e->type->ops.former_request)
e                 431 block/elevator.c 		return e->type->ops.former_request(q, rq);
e                 442 block/elevator.c 	struct elevator_queue *e;
e                 448 block/elevator.c 	e = container_of(kobj, struct elevator_queue, kobj);
e                 449 block/elevator.c 	mutex_lock(&e->sysfs_lock);
e                 450 block/elevator.c 	error = e->type ? entry->show(e, page) : -ENOENT;
e                 451 block/elevator.c 	mutex_unlock(&e->sysfs_lock);
e                 460 block/elevator.c 	struct elevator_queue *e;
e                 466 block/elevator.c 	e = container_of(kobj, struct elevator_queue, kobj);
e                 467 block/elevator.c 	mutex_lock(&e->sysfs_lock);
e                 468 block/elevator.c 	error = e->type ? entry->store(e, page, length) : -ENOENT;
e                 469 block/elevator.c 	mutex_unlock(&e->sysfs_lock);
e                 490 block/elevator.c 	struct elevator_queue *e = q->elevator;
e                 493 block/elevator.c 	error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
e                 495 block/elevator.c 		struct elv_fs_entry *attr = e->type->elevator_attrs;
e                 498 block/elevator.c 				if (sysfs_create_file(&e->kobj, &attr->attr))
e                 504 block/elevator.c 			kobject_uevent(&e->kobj, KOBJ_ADD);
e                 506 block/elevator.c 		e->registered = 1;
e                 519 block/elevator.c 		struct elevator_queue *e = q->elevator;
e                 521 block/elevator.c 		kobject_uevent(&e->kobj, KOBJ_REMOVE);
e                 522 block/elevator.c 		kobject_del(&e->kobj);
e                 524 block/elevator.c 		e->registered = 0;
e                 530 block/elevator.c int elv_register(struct elevator_type *e)
e                 533 block/elevator.c 	if (e->icq_size) {
e                 534 block/elevator.c 		if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
e                 535 block/elevator.c 		    WARN_ON(e->icq_align < __alignof__(struct io_cq)))
e                 538 block/elevator.c 		snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
e                 539 block/elevator.c 			 "%s_io_cq", e->elevator_name);
e                 540 block/elevator.c 		e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
e                 541 block/elevator.c 						 e->icq_align, 0, NULL);
e                 542 block/elevator.c 		if (!e->icq_cache)
e                 548 block/elevator.c 	if (elevator_find(e->elevator_name, 0)) {
e                 550 block/elevator.c 		kmem_cache_destroy(e->icq_cache);
e                 553 block/elevator.c 	list_add_tail(&e->list, &elv_list);
e                 556 block/elevator.c 	printk(KERN_INFO "io scheduler %s registered\n", e->elevator_name);
e                 562 block/elevator.c void elv_unregister(struct elevator_type *e)
e                 566 block/elevator.c 	list_del_init(&e->list);
e                 573 block/elevator.c 	if (e->icq_cache) {
e                 575 block/elevator.c 		kmem_cache_destroy(e->icq_cache);
e                 576 block/elevator.c 		e->icq_cache = NULL;
e                 643 block/elevator.c 	struct elevator_type *e, *found = NULL;
e                 647 block/elevator.c 	list_for_each_entry(e, &elv_list, list) {
e                 648 block/elevator.c 		if (elv_support_features(e->elevator_features,
e                 650 block/elevator.c 			found = e;
e                 670 block/elevator.c 	struct elevator_type *e;
e                 682 block/elevator.c 		e = elevator_get_default(q);
e                 684 block/elevator.c 		e = elevator_get_by_features(q);
e                 685 block/elevator.c 	if (!e)
e                 691 block/elevator.c 	err = blk_mq_init_sched(q, e);
e                 698 block/elevator.c 			"falling back to \"none\"\n", e->elevator_name);
e                 699 block/elevator.c 		elevator_put(e);
e                 733 block/elevator.c 	struct elevator_type *e;
e                 749 block/elevator.c 	e = elevator_get(q, strstrip(elevator_name), true);
e                 750 block/elevator.c 	if (!e)
e                 755 block/elevator.c 		elevator_put(e);
e                 759 block/elevator.c 	return elevator_switch(q, e);
e                 779 block/elevator.c 	struct elevator_queue *e = q->elevator;
e                 790 block/elevator.c 		elv = e->type;
e                 416 block/kyber-iosched.c static int kyber_init_sched(struct request_queue *q, struct elevator_type *e)
e                 421 block/kyber-iosched.c 	eq = elevator_alloc(q, e);
e                 439 block/kyber-iosched.c static void kyber_exit_sched(struct elevator_queue *e)
e                 441 block/kyber-iosched.c 	struct kyber_queue_data *kqd = e->elevator_data;
e                 860 block/kyber-iosched.c static ssize_t kyber_##name##_lat_show(struct elevator_queue *e,	\
e                 863 block/kyber-iosched.c 	struct kyber_queue_data *kqd = e->elevator_data;		\
e                 868 block/kyber-iosched.c static ssize_t kyber_##name##_lat_store(struct elevator_queue *e,	\
e                 871 block/kyber-iosched.c 	struct kyber_queue_data *kqd = e->elevator_data;		\
e                 393 block/mq-deadline.c static void dd_exit_queue(struct elevator_queue *e)
e                 395 block/mq-deadline.c 	struct deadline_data *dd = e->elevator_data;
e                 406 block/mq-deadline.c static int dd_init_queue(struct request_queue *q, struct elevator_type *e)
e                 411 block/mq-deadline.c 	eq = elevator_alloc(q, e);
e                 605 block/mq-deadline.c static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
e                 607 block/mq-deadline.c 	struct deadline_data *dd = e->elevator_data;			\
e                 621 block/mq-deadline.c static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)	\
e                 623 block/mq-deadline.c 	struct deadline_data *dd = e->elevator_data;			\
e                 360 crypto/asymmetric_keys/asym_tpm.c 	uint8_t e[3] = { 0x01, 0x00, 0x01 };
e                 369 crypto/asymmetric_keys/asym_tpm.c 	cur = encode_tag_length(cur, 0x02, sizeof(e));
e                 370 crypto/asymmetric_keys/asym_tpm.c 	memcpy(cur, e, sizeof(e));
e                 371 crypto/asymmetric_keys/asym_tpm.c 	cur += sizeof(e);
e                  80 crypto/ecrdsa.c 	u64 e[ECRDSA_MAX_DIGITS]; /* h \mod q */
e                  81 crypto/ecrdsa.c 	u64 *v = e;		  /* e^{-1} \mod q */
e                  84 crypto/ecrdsa.c 	struct ecc_point cc = ECC_POINT_INIT(s, e, ndigits); /* reuse s, e */
e                 122 crypto/ecrdsa.c 	vli_from_le64(e, digest, ndigits);
e                 123 crypto/ecrdsa.c 	if (vli_cmp(e, ctx->curve->n, ndigits) == 1)
e                 124 crypto/ecrdsa.c 		vli_sub(e, e, ctx->curve->n, ndigits);
e                 125 crypto/ecrdsa.c 	if (vli_is_zero(e, ndigits))
e                 126 crypto/ecrdsa.c 		e[0] = 1;
e                 129 crypto/ecrdsa.c 	vli_mod_inv(v, e, ctx->curve->n, ndigits);
e                  43 crypto/rmd160.c #define ROUND(a, b, c, d, e, f, k, x, s)  { \
e                  45 crypto/rmd160.c 	(a) = rol32((a), (s)) + (e); \
e                  43 crypto/rmd320.c #define ROUND(a, b, c, d, e, f, k, x, s)  { \
e                  45 crypto/rmd320.c 	(a) = rol32((a), (s)) + (e); \
e                  17 crypto/rsa.c   	MPI e;
e                  32 crypto/rsa.c   	return mpi_powm(c, m, key->e, key->n);
e                  65 crypto/rsa.c   	if (unlikely(!pkey->n || !pkey->e)) {
e                 134 crypto/rsa.c   	mpi_free(key->e);
e                 137 crypto/rsa.c   	key->e = NULL;
e                 170 crypto/rsa.c   	mpi_key->e = mpi_read_raw_data(raw_key.e, raw_key.e_sz);
e                 171 crypto/rsa.c   	if (!mpi_key->e)
e                 208 crypto/rsa.c   	mpi_key->e = mpi_read_raw_data(raw_key.e, raw_key.e_sz);
e                 209 crypto/rsa.c   	if (!mpi_key->e)
e                  55 crypto/rsa_helper.c 	key->e = value;
e                 101 crypto/sha512_generic.c 	u64 a, b, c, d, e, f, g, h, t1, t2;
e                 108 crypto/sha512_generic.c 	e=state[4];   f=state[5];   g=state[6];   h=state[7];
e                 126 crypto/sha512_generic.c 		t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i  ] + W[(i & 15)];
e                 128 crypto/sha512_generic.c 		t1 = g + e1(d) + Ch(d,e,f) + sha512_K[i+1] + W[(i & 15) + 1];
e                 130 crypto/sha512_generic.c 		t1 = f + e1(c) + Ch(c,d,e) + sha512_K[i+2] + W[(i & 15) + 2];
e                 132 crypto/sha512_generic.c 		t1 = e + e1(b) + Ch(b,c,d) + sha512_K[i+3] + W[(i & 15) + 3];
e                 133 crypto/sha512_generic.c 		t2 = e0(f) + Maj(f,g,h);    a+=t1;    e=t1+t2;
e                 135 crypto/sha512_generic.c 		t2 = e0(e) + Maj(e,f,g);    h+=t1;    d=t1+t2;
e                 137 crypto/sha512_generic.c 		t2 = e0(d) + Maj(d,e,f);    g+=t1;    c=t1+t2;
e                 139 crypto/sha512_generic.c 		t2 = e0(c) + Maj(c,d,e);    f+=t1;    b=t1+t2;
e                 141 crypto/sha512_generic.c 		t2 = e0(b) + Maj(b,c,d);    e+=t1;    a=t1+t2;
e                 145 crypto/sha512_generic.c 	state[4] += e; state[5] += f; state[6] += g; state[7] += h;
e                 148 crypto/sha512_generic.c 	a = b = c = d = e = f = g = h = t1 = t2 = 0;
e                  44 crypto/sm3_generic.c static inline u32 gg(unsigned int n, u32 e, u32 f, u32 g)
e                  46 crypto/sm3_generic.c 	return (n < 16) ? (e ^ f ^ g) : ((e & f) | ((~e) & g));
e                  78 crypto/sm3_generic.c 	u32 a, b, c, d, e, f, g, h;
e                  85 crypto/sm3_generic.c 	e = m[4];
e                  92 crypto/sm3_generic.c 		ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i & 31)), 7);
e                  99 crypto/sm3_generic.c 		tt2 = gg(i, e, f, g) + h + ss1 + *w;
e                 108 crypto/sm3_generic.c 		f = e;
e                 109 crypto/sm3_generic.c 		e = p0(tt2);
e                 116 crypto/sm3_generic.c 	m[4] = e ^ m[4];
e                 121 crypto/sm3_generic.c 	a = b = c = d = e = f = g = h = ss1 = ss2 = tt1 = tt2 = 0;
e                 259 crypto/tcrypt.c 	const char *e;
e                 276 crypto/tcrypt.c 		e = "encryption";
e                 278 crypto/tcrypt.c 		e = "decryption";
e                 333 crypto/tcrypt.c 		get_driver_name(crypto_aead, tfm), e);
e                 424 crypto/tcrypt.c 				pr_err("%s() failed return code=%d\n", e, ret);
e                 530 crypto/tcrypt.c 	const char *e;
e                 550 crypto/tcrypt.c 		e = "encryption";
e                 552 crypto/tcrypt.c 		e = "decryption";
e                 576 crypto/tcrypt.c 			get_driver_name(crypto_aead, tfm), e);
e                 670 crypto/tcrypt.c 				pr_err("%s() failed return code=%d\n", e, ret);
e                1256 crypto/tcrypt.c 	const char *e;
e                1262 crypto/tcrypt.c 		e = "encryption";
e                1264 crypto/tcrypt.c 		e = "decryption";
e                1312 crypto/tcrypt.c 		get_driver_name(crypto_skcipher, tfm), e);
e                1388 crypto/tcrypt.c 				pr_err("%s() failed flags=%x\n", e,
e                1499 crypto/tcrypt.c 	const char *e;
e                1503 crypto/tcrypt.c 		e = "encryption";
e                1505 crypto/tcrypt.c 		e = "decryption";
e                1518 crypto/tcrypt.c 			get_driver_name(crypto_skcipher, tfm), e);
e                1603 crypto/tcrypt.c 				pr_err("%s() failed flags=%x\n", e,
e                2357 crypto/testmgr.c 	const char *e;
e                2367 crypto/testmgr.c 	        e = "encryption";
e                2369 crypto/testmgr.c 		e = "decryption";
e                2422 crypto/testmgr.c 			       "on %s for %s\n", j, e, algo);
e                 402 drivers/acpi/acpica/acmacros.h #define ARGI_LIST5(a, b, c, d, e)       (ARG_1(e)|ARG_2(d)|ARG_3(c)|ARG_4(b)|ARG_5(a))
e                 403 drivers/acpi/acpica/acmacros.h #define ARGI_LIST6(a, b, c, d, e, f)    (ARG_1(f)|ARG_2(e)|ARG_3(d)|ARG_4(c)|ARG_5(b)|ARG_6(a))
e                 409 drivers/acpi/acpica/acmacros.h #define ARGP_LIST5(a, b, c, d, e)       (ARG_1(a)|ARG_2(b)|ARG_3(c)|ARG_4(d)|ARG_5(e))
e                 410 drivers/acpi/acpica/acmacros.h #define ARGP_LIST6(a, b, c, d, e, f)    (ARG_1(a)|ARG_2(b)|ARG_3(c)|ARG_4(d)|ARG_5(e)|ARG_6(f))
e                 424 drivers/acpi/acpica/acmacros.h #define ACPI_ERROR_NAMESPACE(s, p, e)       acpi_ut_prefixed_namespace_error (AE_INFO, s, p, e);
e                 425 drivers/acpi/acpica/acmacros.h #define ACPI_ERROR_METHOD(s, n, p, e)       acpi_ut_method_error (AE_INFO, s, n, p, e);
e                 435 drivers/acpi/acpica/acmacros.h #define ACPI_ERROR_NAMESPACE(s, p, e)
e                 436 drivers/acpi/acpica/acmacros.h #define ACPI_ERROR_METHOD(s, n, p, e)
e                 124 drivers/acpi/acpica/acpredef.h #define PACKAGE_INFO(a,b,c,d,e,f)       {{{(a),(b),(c),(d)}, ((((u16)(f)) << 8) | (e)), 0}}
e                 199 drivers/acpi/pci_mcfg.c 	struct mcfg_entry *e;
e                 209 drivers/acpi/pci_mcfg.c 	list_for_each_entry(e, &pci_mcfg_list, list) {
e                 210 drivers/acpi/pci_mcfg.c 		if (e->segment == seg && e->bus_start <= bus_res->start &&
e                 211 drivers/acpi/pci_mcfg.c 		    e->bus_end >= bus_res->end) {
e                 212 drivers/acpi/pci_mcfg.c 			root->mcfg_addr = e->addr;
e                 244 drivers/acpi/pci_mcfg.c 	struct mcfg_entry *e, *arr;
e                 259 drivers/acpi/pci_mcfg.c 	for (i = 0, e = arr; i < n; i++, mptr++, e++) {
e                 260 drivers/acpi/pci_mcfg.c 		e->segment = mptr->pci_segment;
e                 261 drivers/acpi/pci_mcfg.c 		e->addr =  mptr->address;
e                 262 drivers/acpi/pci_mcfg.c 		e->bus_start = mptr->start_bus_number;
e                 263 drivers/acpi/pci_mcfg.c 		e->bus_end = mptr->end_bus_number;
e                 264 drivers/acpi/pci_mcfg.c 		list_add(&e->list, &pci_mcfg_list);
e                 105 drivers/acpi/power.c 		struct acpi_power_resource_entry *e;
e                 107 drivers/acpi/power.c 		list_for_each_entry(e, list, node)
e                 108 drivers/acpi/power.c 			if (e->resource->order > resource->order) {
e                 109 drivers/acpi/power.c 				list_add_tail(&entry->node, &e->node);
e                 119 drivers/acpi/power.c 	struct acpi_power_resource_entry *entry, *e;
e                 121 drivers/acpi/power.c 	list_for_each_entry_safe(entry, e, list, node) {
e                 203 drivers/android/binder.c 	struct binder_transaction_log_entry *e;
e                 208 drivers/android/binder.c 	e = &log->entry[cur % ARRAY_SIZE(log->entry)];
e                 209 drivers/android/binder.c 	WRITE_ONCE(e->debug_id_done, 0);
e                 216 drivers/android/binder.c 	memset(e, 0, sizeof(*e));
e                 217 drivers/android/binder.c 	return e;
e                2858 drivers/android/binder.c 	struct binder_transaction_log_entry *e;
e                2869 drivers/android/binder.c 	e = binder_transaction_log_add(&binder_transaction_log);
e                2870 drivers/android/binder.c 	e->debug_id = t_debug_id;
e                2871 drivers/android/binder.c 	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
e                2872 drivers/android/binder.c 	e->from_proc = proc->pid;
e                2873 drivers/android/binder.c 	e->from_thread = thread->pid;
e                2874 drivers/android/binder.c 	e->target_handle = tr->target.handle;
e                2875 drivers/android/binder.c 	e->data_size = tr->data_size;
e                2876 drivers/android/binder.c 	e->offsets_size = tr->offsets_size;
e                2877 drivers/android/binder.c 	strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
e                2986 drivers/android/binder.c 		e->to_node = target_node->debug_id;
e                3054 drivers/android/binder.c 		e->to_thread = target_thread->pid;
e                3055 drivers/android/binder.c 	e->to_proc = target_proc->pid;
e                3474 drivers/android/binder.c 	WRITE_ONCE(e->debug_id_done, t_debug_id);
e                3529 drivers/android/binder.c 		e->return_error = return_error;
e                3530 drivers/android/binder.c 		e->return_error_param = return_error_param;
e                3531 drivers/android/binder.c 		e->return_error_line = return_error_line;
e                3533 drivers/android/binder.c 		*fe = *e;
e                3539 drivers/android/binder.c 		WRITE_ONCE(e->debug_id_done, t_debug_id);
e                4265 drivers/android/binder.c 			struct binder_error *e = container_of(
e                4268 drivers/android/binder.c 			WARN_ON(e->cmd == BR_OK);
e                4270 drivers/android/binder.c 			if (put_user(e->cmd, (uint32_t __user *)ptr))
e                4272 drivers/android/binder.c 			cmd = e->cmd;
e                4273 drivers/android/binder.c 			e->cmd = BR_OK;
e                4597 drivers/android/binder.c 			struct binder_error *e = container_of(
e                4602 drivers/android/binder.c 				e->cmd);
e                5597 drivers/android/binder.c 		struct binder_error *e = container_of(
e                5601 drivers/android/binder.c 			   prefix, e->cmd);
e                6020 drivers/android/binder.c 					struct binder_transaction_log_entry *e)
e                6022 drivers/android/binder.c 	int debug_id = READ_ONCE(e->debug_id_done);
e                6030 drivers/android/binder.c 		   e->debug_id, (e->call_type == 2) ? "reply" :
e                6031 drivers/android/binder.c 		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
e                6032 drivers/android/binder.c 		   e->from_thread, e->to_proc, e->to_thread, e->context_name,
e                6033 drivers/android/binder.c 		   e->to_node, e->target_handle, e->data_size, e->offsets_size,
e                6034 drivers/android/binder.c 		   e->return_error, e->return_error_param,
e                6035 drivers/android/binder.c 		   e->return_error_line);
e                6041 drivers/android/binder.c 	seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
e                1536 drivers/atm/idt77252.c 	int e;
e                1560 drivers/atm/idt77252.c 		for (e = 0; e < card->tst_size - 2; e++) {
e                1561 drivers/atm/idt77252.c 			if (card->soft_tst[e].tste & TSTE_PUSH_IDLE) {
e                1562 drivers/atm/idt77252.c 				write_sram(card, idle + e,
e                1563 drivers/atm/idt77252.c 					   card->soft_tst[e].tste & TSTE_MASK);
e                1564 drivers/atm/idt77252.c 				card->soft_tst[e].tste &= ~(TSTE_PUSH_IDLE);
e                1571 drivers/atm/idt77252.c 		for (e = 0; e < card->tst_size - 2; e++) {
e                1572 drivers/atm/idt77252.c 			if (card->soft_tst[e].tste & TSTE_PUSH_ACTIVE) {
e                1573 drivers/atm/idt77252.c 				write_sram(card, idle + e,
e                1574 drivers/atm/idt77252.c 					   card->soft_tst[e].tste & TSTE_MASK);
e                1575 drivers/atm/idt77252.c 				card->soft_tst[e].tste &= ~(TSTE_PUSH_ACTIVE);
e                1576 drivers/atm/idt77252.c 				card->soft_tst[e].tste |= TSTE_PUSH_IDLE;
e                1598 drivers/atm/idt77252.c 	int e, r;
e                1602 drivers/atm/idt77252.c 	for (e = 0; e < avail; e++) {
e                1603 drivers/atm/idt77252.c 		if (card->soft_tst[e].vc == NULL)
e                1606 drivers/atm/idt77252.c 	if (e >= avail) {
e                1612 drivers/atm/idt77252.c 		card->name, vc ? vc->index : -1, e);
e                1626 drivers/atm/idt77252.c 		if ((cl >= avail) && (card->soft_tst[e].vc == NULL)) {
e                1628 drivers/atm/idt77252.c 				card->soft_tst[e].vc = vc;
e                1630 drivers/atm/idt77252.c 				card->soft_tst[e].vc = (void *)-1;
e                1632 drivers/atm/idt77252.c 			card->soft_tst[e].tste = data;
e                1634 drivers/atm/idt77252.c 				card->soft_tst[e].tste |= TSTE_PUSH_ACTIVE;
e                1636 drivers/atm/idt77252.c 				write_sram(card, idle + e, data);
e                1637 drivers/atm/idt77252.c 				card->soft_tst[e].tste |= TSTE_PUSH_IDLE;
e                1644 drivers/atm/idt77252.c 		if (++e == avail)
e                1645 drivers/atm/idt77252.c 			e = 0;
e                1674 drivers/atm/idt77252.c 	int e;
e                1678 drivers/atm/idt77252.c 	for (e = 0; e < card->tst_size - 2; e++) {
e                1679 drivers/atm/idt77252.c 		if (card->soft_tst[e].vc == vc) {
e                1680 drivers/atm/idt77252.c 			card->soft_tst[e].vc = NULL;
e                1682 drivers/atm/idt77252.c 			card->soft_tst[e].tste = TSTE_OPC_VAR;
e                1684 drivers/atm/idt77252.c 				card->soft_tst[e].tste |= TSTE_PUSH_ACTIVE;
e                1686 drivers/atm/idt77252.c 				write_sram(card, idle + e, TSTE_OPC_VAR);
e                1687 drivers/atm/idt77252.c 				card->soft_tst[e].tste |= TSTE_PUSH_IDLE;
e                2049 drivers/atm/idt77252.c 	u16 m, e;
e                2053 drivers/atm/idt77252.c 	e = idt77252_fls(rate) - 1;
e                2054 drivers/atm/idt77252.c 	if (e < 9)
e                2055 drivers/atm/idt77252.c 		m = (rate - (1 << e)) << (9 - e);
e                2056 drivers/atm/idt77252.c 	else if (e == 9)
e                2057 drivers/atm/idt77252.c 		m = (rate - (1 << e));
e                2059 drivers/atm/idt77252.c 		m = (rate - (1 << e)) >> (e - 9);
e                2060 drivers/atm/idt77252.c 	return 0x4000 | (e << 9) | m;
e                 948 drivers/atm/lanai.c 	const u8 *e = lanai->eeprom;
e                 953 drivers/atm/lanai.c 		if (e[i] < 0x20 || e[i] > 0x7E)
e                 956 drivers/atm/lanai.c 	    i != EEPROM_COPYRIGHT + EEPROM_COPYRIGHT_LEN && e[i] == '\0')
e                 958 drivers/atm/lanai.c 		    (char *) &e[EEPROM_COPYRIGHT]);
e                 964 drivers/atm/lanai.c 		s += e[i];
e                 966 drivers/atm/lanai.c 	if (s != e[EEPROM_CHECKSUM]) {
e                 969 drivers/atm/lanai.c 		    (unsigned int) s, (unsigned int) e[EEPROM_CHECKSUM]);
e                 973 drivers/atm/lanai.c 	if (s != e[EEPROM_CHECKSUM_REV]) {
e                 976 drivers/atm/lanai.c 		    (unsigned int) s, (unsigned int) e[EEPROM_CHECKSUM_REV]);
e                 981 drivers/atm/lanai.c 		if ((e[EEPROM_MAC + i] ^ e[EEPROM_MAC_REV + i]) != 0xFF) {
e                 985 drivers/atm/lanai.c 			    (unsigned int) e[EEPROM_MAC + i],
e                 986 drivers/atm/lanai.c 			    (unsigned int) e[EEPROM_MAC_REV + i]);
e                 989 drivers/atm/lanai.c 	DPRINTK("eeprom: MAC address = %pM\n", &e[EEPROM_MAC]);
e                1084 drivers/atm/lanai.c 	e(STATUS_SOOL, "SOOL");
e                1085 drivers/atm/lanai.c 	e(STATUS_LOCD, "LOCD");
e                1086 drivers/atm/lanai.c 	e(STATUS_LED, "LED");
e                1087 drivers/atm/lanai.c 	e(STATUS_GPIN, "GPIN");
e                1122 drivers/atm/lanai.c 	e(PCI_STATUS_DETECTED_PARITY, "parity", parity_detect);
e                1123 drivers/atm/lanai.c 	e(PCI_STATUS_SIG_SYSTEM_ERROR, "signalled system", serr_set);
e                1124 drivers/atm/lanai.c 	e(PCI_STATUS_REC_MASTER_ABORT, "master", master_abort);
e                1125 drivers/atm/lanai.c 	e(PCI_STATUS_REC_TARGET_ABORT, "master target", m_target_abort);
e                1126 drivers/atm/lanai.c 	e(PCI_STATUS_SIG_TARGET_ABORT, "slave", s_target_abort);
e                1127 drivers/atm/lanai.c 	e(PCI_STATUS_PARITY, "master parity", master_parity);
e                1200 drivers/atm/lanai.c 	unsigned char *e;
e                1202 drivers/atm/lanai.c 	e = ((unsigned char *) lvcc->tx.buf.ptr) + n;
e                1203 drivers/atm/lanai.c 	m = e - (unsigned char *) lvcc->tx.buf.end;
e                1209 drivers/atm/lanai.c 		e = ((unsigned char *) lvcc->tx.buf.start) + m;
e                1211 drivers/atm/lanai.c 	lvcc->tx.buf.ptr = (u32 *) e;
e                1216 drivers/atm/lanai.c 	unsigned char *e;
e                1220 drivers/atm/lanai.c 	e = ((unsigned char *) lvcc->tx.buf.ptr) + n;
e                1221 drivers/atm/lanai.c 	m = e - (unsigned char *) lvcc->tx.buf.end;
e                1227 drivers/atm/lanai.c 		e = ((unsigned char *) lvcc->tx.buf.start) + m;
e                1229 drivers/atm/lanai.c 	lvcc->tx.buf.ptr = (u32 *) e;
e                1577 drivers/atm/nicstar.c 	int e, r;
e                1588 drivers/atm/nicstar.c 	for (e = 0; e < NS_TST_NUM_ENTRIES; e++) {
e                1589 drivers/atm/nicstar.c 		if (card->tste2vc[e] == NULL)
e                1592 drivers/atm/nicstar.c 	if (e == NS_TST_NUM_ENTRIES) {
e                1602 drivers/atm/nicstar.c 		if (cl >= NS_TST_NUM_ENTRIES && card->tste2vc[e] == NULL) {
e                1603 drivers/atm/nicstar.c 			card->tste2vc[e] = vc;
e                1604 drivers/atm/nicstar.c 			ns_write_sram(card, new_tst + e, &data, 1);
e                1609 drivers/atm/nicstar.c 		if (++e == NS_TST_NUM_ENTRIES) {
e                1610 drivers/atm/nicstar.c 			e = 0;
e                 210 drivers/auxdisplay/panel.c 		int e;
e                 919 drivers/auxdisplay/panel.c 		lcd.pins.e = PIN_STROBE;
e                 944 drivers/auxdisplay/panel.c 		lcd.pins.e = PIN_AUTOLF;
e                 964 drivers/auxdisplay/panel.c 		lcd.pins.e = PIN_STROBE;
e                 988 drivers/auxdisplay/panel.c 		lcd.pins.e = lcd_e_pin;
e                1021 drivers/auxdisplay/panel.c 		if (lcd.pins.e == PIN_NOT_SET)
e                1022 drivers/auxdisplay/panel.c 			lcd.pins.e = DEFAULT_LCD_PIN_E;
e                1034 drivers/auxdisplay/panel.c 	if (lcd.pins.e == PIN_NOT_SET)
e                1035 drivers/auxdisplay/panel.c 		lcd.pins.e = PIN_NONE;
e                1055 drivers/auxdisplay/panel.c 	pin_to_bits(lcd.pins.e, lcd_bits[LCD_PORT_D][LCD_BIT_E],
e                1739 drivers/auxdisplay/panel.c 		lcd.pins.e = lcd_e_pin;
e                  93 drivers/bcma/driver_chipcommon_sflash.c 	const struct bcma_sflash_tbl_e *e;
e                 110 drivers/bcma/driver_chipcommon_sflash.c 			for (e = bcma_sflash_sst_tbl; e->name; e++) {
e                 111 drivers/bcma/driver_chipcommon_sflash.c 				if (e->id == id2)
e                 118 drivers/bcma/driver_chipcommon_sflash.c 			for (e = bcma_sflash_st_tbl; e->name; e++) {
e                 119 drivers/bcma/driver_chipcommon_sflash.c 				if (e->id == id)
e                 124 drivers/bcma/driver_chipcommon_sflash.c 		if (!e->name) {
e                 134 drivers/bcma/driver_chipcommon_sflash.c 		for (e = bcma_sflash_at_tbl; e->name; e++) {
e                 135 drivers/bcma/driver_chipcommon_sflash.c 			if (e->id == id)
e                 138 drivers/bcma/driver_chipcommon_sflash.c 		if (!e->name) {
e                 149 drivers/bcma/driver_chipcommon_sflash.c 	sflash->blocksize = e->blocksize;
e                 150 drivers/bcma/driver_chipcommon_sflash.c 	sflash->numblocks = e->numblocks;
e                 155 drivers/bcma/driver_chipcommon_sflash.c 		  e->name, sflash->size / 1024, sflash->blocksize,
e                  65 drivers/block/aoe/aoeblk.c 	struct aoeif *ifp, *e;
e                  75 drivers/block/aoe/aoeblk.c 		e = ifp + NAOEIFS;
e                  76 drivers/block/aoe/aoeblk.c 		for (; ifp < e && ifp->nd; ifp++) {
e                 528 drivers/block/aoe/aoecmd.c 	struct aoeif *p, *e;
e                 531 drivers/block/aoe/aoecmd.c 	e = p + NAOEIFS;
e                 532 drivers/block/aoe/aoecmd.c 	for (; p < e; p++)
e                 541 drivers/block/aoe/aoecmd.c 	struct aoeif *e;
e                 546 drivers/block/aoe/aoecmd.c 	e = t->ifs + NAOEIFS - 1;
e                 547 drivers/block/aoe/aoecmd.c 	n = (e - ifp) * sizeof *ifp;
e                 549 drivers/block/aoe/aoecmd.c 	e->nd = NULL;
e                1012 drivers/block/aoe/aoecmd.c 	struct aoetgt **t, **e;
e                1015 drivers/block/aoe/aoecmd.c 	e = t + d->ntargets;
e                1016 drivers/block/aoe/aoecmd.c 	for (; t < e && *t; t++)
e                1476 drivers/block/aoe/aoecmd.c 	struct aoetgt **t, **e;
e                1480 drivers/block/aoe/aoecmd.c 	e = t + d->ntargets;
e                1481 drivers/block/aoe/aoecmd.c 	for (; t < e && *t; t++)
e                1495 drivers/block/aoe/aoecmd.c 	struct aoeif *p, *e;
e                1501 drivers/block/aoe/aoecmd.c 	e = p + NAOEIFS;
e                1502 drivers/block/aoe/aoecmd.c 	for (; p < e; p++) {
e                1512 drivers/block/aoe/aoecmd.c 		if (p == e) {
e                 262 drivers/block/aoe/aoedev.c 	struct aoetgt **t, **e;
e                 285 drivers/block/aoe/aoedev.c 	e = t + d->ntargets;
e                 286 drivers/block/aoe/aoedev.c 	for (; t < e && *t; t++)
e                 318 drivers/block/drbd/drbd_actlog.c 	struct lc_element *e;
e                 338 drivers/block/drbd/drbd_actlog.c 	list_for_each_entry(e, &device->act_log->to_be_changed, list) {
e                 343 drivers/block/drbd/drbd_actlog.c 		buffer->update_slot_nr[i] = cpu_to_be16(e->lc_index);
e                 344 drivers/block/drbd/drbd_actlog.c 		buffer->update_extent_nr[i] = cpu_to_be32(e->lc_new_number);
e                 345 drivers/block/drbd/drbd_actlog.c 		if (e->lc_number != LC_FREE)
e                 347 drivers/block/drbd/drbd_actlog.c 					al_extent_to_bm_page(e->lc_number));
e                 655 drivers/block/drbd/drbd_actlog.c 	struct lc_element *e;
e                 667 drivers/block/drbd/drbd_actlog.c 		e = lc_find(device->resync, enr);
e                 669 drivers/block/drbd/drbd_actlog.c 		e = lc_get(device->resync, enr);
e                 670 drivers/block/drbd/drbd_actlog.c 	if (e) {
e                 671 drivers/block/drbd/drbd_actlog.c 		struct bm_extent *ext = lc_entry(e, struct bm_extent, lce);
e                 904 drivers/block/drbd/drbd_actlog.c 	struct lc_element *e;
e                 914 drivers/block/drbd/drbd_actlog.c 	e = lc_get(device->resync, enr);
e                 915 drivers/block/drbd/drbd_actlog.c 	bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
e                1016 drivers/block/drbd/drbd_actlog.c 	struct lc_element *e;
e                1044 drivers/block/drbd/drbd_actlog.c 		e = lc_find(device->resync, device->resync_wenr);
e                1045 drivers/block/drbd/drbd_actlog.c 		bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
e                1061 drivers/block/drbd/drbd_actlog.c 	e = lc_try_get(device->resync, enr);
e                1062 drivers/block/drbd/drbd_actlog.c 	bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
e                1082 drivers/block/drbd/drbd_actlog.c 		e = lc_get(device->resync, enr);
e                1083 drivers/block/drbd/drbd_actlog.c 		bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
e                1137 drivers/block/drbd/drbd_actlog.c 	struct lc_element *e;
e                1142 drivers/block/drbd/drbd_actlog.c 	e = lc_find(device->resync, enr);
e                1143 drivers/block/drbd/drbd_actlog.c 	bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
e                1195 drivers/block/drbd/drbd_actlog.c 	struct lc_element *e;
e                1204 drivers/block/drbd/drbd_actlog.c 			e = lc_element_by_index(device->resync, i);
e                1205 drivers/block/drbd/drbd_actlog.c 			bm_ext = lc_entry(e, struct bm_extent, lce);
e                1352 drivers/block/drbd/drbd_bitmap.c 	unsigned long e, int val)
e                1361 drivers/block/drbd/drbd_bitmap.c 	if (e >= b->bm_bits) {
e                1363 drivers/block/drbd/drbd_bitmap.c 				s, e, b->bm_bits);
e                1364 drivers/block/drbd/drbd_bitmap.c 		e = b->bm_bits ? b->bm_bits -1 : 0;
e                1366 drivers/block/drbd/drbd_bitmap.c 	for (bitnr = s; bitnr <= e; bitnr++) {
e                1401 drivers/block/drbd/drbd_bitmap.c 	const unsigned long e, int val)
e                1416 drivers/block/drbd/drbd_bitmap.c 	c = __bm_change_bits_to(device, s, e, val);
e                1423 drivers/block/drbd/drbd_bitmap.c int drbd_bm_set_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
e                1425 drivers/block/drbd/drbd_bitmap.c 	return bm_change_bits_to(device, s, e, 1);
e                1429 drivers/block/drbd/drbd_bitmap.c int drbd_bm_clear_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
e                1431 drivers/block/drbd/drbd_bitmap.c 	return -bm_change_bits_to(device, s, e, 0);
e                1466 drivers/block/drbd/drbd_bitmap.c void _drbd_bm_set_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
e                1478 drivers/block/drbd/drbd_bitmap.c 	unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1);
e                1485 drivers/block/drbd/drbd_bitmap.c 	if (e - s <= 3*BITS_PER_LONG) {
e                1488 drivers/block/drbd/drbd_bitmap.c 		__bm_change_bits_to(device, s, e, 1);
e                1534 drivers/block/drbd/drbd_bitmap.c 	if (el <= e)
e                1535 drivers/block/drbd/drbd_bitmap.c 		__bm_change_bits_to(device, el, e, 1);
e                1577 drivers/block/drbd/drbd_bitmap.c int drbd_bm_count_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
e                1598 drivers/block/drbd/drbd_bitmap.c 	for (bitnr = s; bitnr <= e; bitnr++) {
e                1635 drivers/block/drbd/drbd_bitmap.c 	int count, s, e;
e                1649 drivers/block/drbd/drbd_bitmap.c 	e = min((size_t)S2W(enr+1), b->bm_words);
e                1652 drivers/block/drbd/drbd_bitmap.c 		int n = e-s;
e                 649 drivers/block/drbd/drbd_debugfs.c static void resync_dump_detail(struct seq_file *m, struct lc_element *e)
e                 651 drivers/block/drbd/drbd_debugfs.c        struct bm_extent *bme = lc_entry(e, struct bm_extent, lce);
e                1349 drivers/block/drbd/drbd_int.h 		struct drbd_device *device, unsigned long s, unsigned long e);
e                1351 drivers/block/drbd/drbd_int.h 		struct drbd_device *device, unsigned long s, unsigned long e);
e                1353 drivers/block/drbd/drbd_int.h 	struct drbd_device *device, const unsigned long s, const unsigned long e);
e                1357 drivers/block/drbd/drbd_int.h 		const unsigned long s, const unsigned long e);
e                1566 drivers/block/drbd/drbd_int.h #define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
e                1567 drivers/block/drbd/drbd_int.h #define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
e                1809 drivers/block/drbd/drbd_int.h #define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
e                1149 drivers/block/drbd/drbd_nl.c 	struct lc_element *e;
e                1169 drivers/block/drbd/drbd_nl.c 			e = lc_element_by_index(t, i);
e                1170 drivers/block/drbd/drbd_nl.c 			if (e->refcnt)
e                1172 drivers/block/drbd/drbd_nl.c 				    e->lc_number, e->refcnt);
e                1173 drivers/block/drbd/drbd_nl.c 			in_use += e->refcnt;
e                4741 drivers/block/drbd/drbd_receiver.c 	unsigned long e;
e                4758 drivers/block/drbd/drbd_receiver.c 			e = s + rl -1;
e                4759 drivers/block/drbd/drbd_receiver.c 			if (e >= c->bm_bits) {
e                4760 drivers/block/drbd/drbd_receiver.c 				drbd_err(peer_device, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
e                4763 drivers/block/drbd/drbd_receiver.c 			_drbd_bm_set_bits(peer_device->device, s, e);
e                 280 drivers/block/paride/bpck.c {	int i, e, l, h, om;
e                 343 drivers/block/paride/bpck.c 	e = 0;
e                 344 drivers/block/paride/bpck.c 	for (i=0;i<TEST_LEN;i++) if (buf[i] != (i+1)) e++;
e                 345 drivers/block/paride/bpck.c 	return e;
e                 254 drivers/block/paride/epat.c 	int	e[2] = {0,0};
e                 266 drivers/block/paride/epat.c                 if (RRi(2) != (k^0xaa)) e[j]++;
e                 284 drivers/block/paride/epat.c 		   pi->device,pi->port,pi->mode,cc,e[0],e[1],f);
e                 287 drivers/block/paride/epat.c         return (e[0] && e[1]) || f;
e                 240 drivers/block/paride/epia.c 	int	e[2] = {0,0};
e                 248 drivers/block/paride/epia.c                 if (RR(2) != (k^0xaa)) e[j]++;
e                 267 drivers/block/paride/epia.c                    pi->device,pi->port,pi->mode,e[0],e[1],f);
e                 270 drivers/block/paride/epia.c         return (e[0] && e[1]) || f;
e                 186 drivers/block/paride/friq.c 	int	e[2] = {0,0};
e                 199 drivers/block/paride/friq.c                         if (friq_read_regr(pi,0,2) != (k^0xaa)) e[j]++;
e                 212 drivers/block/paride/friq.c                    pi->device,pi->port,pi->mode,e[0],e[1],r);
e                 215 drivers/block/paride/friq.c         return (r || (e[0] && e[1]));
e                 227 drivers/block/paride/frpw.c 	int	e[2] = {0,0};
e                 252 drivers/block/paride/frpw.c                         if (frpw_read_regr(pi,0,2) != (k^0xaa)) e[j]++;
e                 265 drivers/block/paride/frpw.c                    pi->device,pi->port,(pi->private%2),pi->mode,e[0],e[1],r);
e                 268 drivers/block/paride/frpw.c         return (r || (e[0] && e[1]));
e                 173 drivers/block/paride/paride.c 	int e[2] = { 0, 0 };
e                 183 drivers/block/paride/paride.c 				e[j]++;
e                 191 drivers/block/paride/paride.c 		       pi->mode, e[0], e[1]);
e                 193 drivers/block/paride/paride.c 	return (e[0] && e[1]);	/* not here if both > 0 */
e                 307 drivers/block/paride/paride.c 	int max, s, e;
e                 310 drivers/block/paride/paride.c 	e = s + 1;
e                 314 drivers/block/paride/paride.c 		e = pi->proto->max_units;
e                 329 drivers/block/paride/paride.c 		for (pi->unit = s; pi->unit < e; pi->unit++)
e                 354 drivers/block/paride/paride.c 	int p, k, s, e;
e                 358 drivers/block/paride/paride.c 	e = s + 1;
e                 365 drivers/block/paride/paride.c 		e = MAX_PROTOS;
e                 373 drivers/block/paride/paride.c 	for (p = s; p < e; p++) {
e                 379 drivers/block/paride/pcd.c 	int j, r, e, s, p;
e                 388 drivers/block/paride/pcd.c 		e = read_reg(cd, 1);
e                 391 drivers/block/paride/pcd.c 			e |= 0x100;
e                 395 drivers/block/paride/pcd.c 			       cd->name, fun, msg, r, s, e, j, p);
e                 309 drivers/block/paride/pd.c 	int k, r, e;
e                 319 drivers/block/paride/pd.c 	e = (read_reg(disk, 1) << 8) + read_reg(disk, 7);
e                 321 drivers/block/paride/pd.c 		e |= ERR_TMO;
e                 322 drivers/block/paride/pd.c 	if ((e & (STAT_ERR | ERR_TMO)) && (msg != NULL))
e                 323 drivers/block/paride/pd.c 		pd_print_error(disk, msg, e);
e                 324 drivers/block/paride/pd.c 	return e;
e                 428 drivers/block/paride/pf.c 	int j, r, e, s, p;
e                 437 drivers/block/paride/pf.c 		e = read_reg(pf, 1);
e                 440 drivers/block/paride/pf.c 			e |= 0x100;
e                 444 drivers/block/paride/pf.c 			       pf->name, fun, msg, r, s, e, j, p);
e                 445 drivers/block/paride/pf.c 		return (e << 8) + s;
e                 290 drivers/block/paride/pg.c 	int j, r, e, s, p, to;
e                 307 drivers/block/paride/pg.c 		e = read_reg(dev, 1);
e                 311 drivers/block/paride/pg.c 			       dev->name, msg, s, e, p, to ? " timeout" : "");
e                 313 drivers/block/paride/pg.c 			e |= 0x100;
e                 314 drivers/block/paride/pg.c 		dev->status = (e >> 4) & 0xff;
e                 274 drivers/block/paride/pt.c 	int j, r, e, s, p;
e                 284 drivers/block/paride/pt.c 		e = read_reg(pi, 1);
e                 287 drivers/block/paride/pt.c 			e |= 0x100;
e                 291 drivers/block/paride/pt.c 			       tape->name, fun, msg, r, s, e, j, p);
e                 292 drivers/block/paride/pt.c 		return (e << 8) + s;
e                 397 drivers/block/paride/pt.c 	int k, e, s;
e                 400 drivers/block/paride/pt.c 	e = 0;
e                 408 drivers/block/paride/pt.c 		e = read_reg(pi, 1);
e                 418 drivers/block/paride/pt.c 			       e);
e                1851 drivers/char/ipmi/ipmi_si_intf.c 	struct smi_info *e;
e                1853 drivers/char/ipmi/ipmi_si_intf.c 	list_for_each_entry(e, &smi_infos, link) {
e                1854 drivers/char/ipmi/ipmi_si_intf.c 		if (e->io.addr_space != info->io.addr_space)
e                1856 drivers/char/ipmi/ipmi_si_intf.c 		if (e->io.addr_data == info->io.addr_data) {
e                1862 drivers/char/ipmi/ipmi_si_intf.c 			if (info->io.slave_addr && !e->io.slave_addr)
e                1863 drivers/char/ipmi/ipmi_si_intf.c 				e->io.slave_addr = info->io.slave_addr;
e                1864 drivers/char/ipmi/ipmi_si_intf.c 			return e;
e                2094 drivers/char/ipmi/ipmi_si_intf.c 	struct smi_info *e;
e                2115 drivers/char/ipmi/ipmi_si_intf.c 	list_for_each_entry(e, &smi_infos, link) {
e                2119 drivers/char/ipmi/ipmi_si_intf.c 		if (e->io.irq && (!type || e->io.addr_source == type)) {
e                2120 drivers/char/ipmi/ipmi_si_intf.c 			if (!try_smi_init(e)) {
e                2121 drivers/char/ipmi/ipmi_si_intf.c 				type = e->io.addr_source;
e                2132 drivers/char/ipmi/ipmi_si_intf.c 	list_for_each_entry(e, &smi_infos, link) {
e                2133 drivers/char/ipmi/ipmi_si_intf.c 		if (!e->io.irq && (!type || e->io.addr_source == type)) {
e                2134 drivers/char/ipmi/ipmi_si_intf.c 			if (!try_smi_init(e)) {
e                2135 drivers/char/ipmi/ipmi_si_intf.c 				type = e->io.addr_source;
e                2241 drivers/char/ipmi/ipmi_si_intf.c 	struct smi_info *e;
e                2245 drivers/char/ipmi/ipmi_si_intf.c 	list_for_each_entry(e, &smi_infos, link) {
e                2246 drivers/char/ipmi/ipmi_si_intf.c 		if (e->io.dev == dev) {
e                2247 drivers/char/ipmi/ipmi_si_intf.c 			cleanup_one_si(e);
e                2261 drivers/char/ipmi/ipmi_si_intf.c 	struct smi_info *e, *tmp_e;
e                2265 drivers/char/ipmi/ipmi_si_intf.c 	list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
e                2266 drivers/char/ipmi/ipmi_si_intf.c 		if (e->io.addr_space != addr_space)
e                2268 drivers/char/ipmi/ipmi_si_intf.c 		if (e->io.si_type != si_type)
e                2270 drivers/char/ipmi/ipmi_si_intf.c 		if (e->io.addr_data == addr) {
e                2271 drivers/char/ipmi/ipmi_si_intf.c 			dev = get_device(e->io.dev);
e                2272 drivers/char/ipmi/ipmi_si_intf.c 			cleanup_one_si(e);
e                2282 drivers/char/ipmi/ipmi_si_intf.c 	struct smi_info *e, *tmp_e;
e                2294 drivers/char/ipmi/ipmi_si_intf.c 	list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
e                2295 drivers/char/ipmi/ipmi_si_intf.c 		cleanup_one_si(e);
e                 195 drivers/clk/meson/axg-audio.c static AUD_MST_SCLK_PRE_EN(e, AUDIO_MST_E_SCLK_CTRL0);
e                 231 drivers/clk/meson/axg-audio.c static AUD_MST_SCLK_DIV(e, AUDIO_MST_E_SCLK_CTRL0);
e                 242 drivers/clk/meson/axg-audio.c static AUD_MST_SCLK_POST_EN(e, AUDIO_MST_E_SCLK_CTRL0);
e                 282 drivers/clk/meson/axg-audio.c static AUD_MST_SCLK(e, AUDIO_MST_E_SCLK_CTRL1);
e                 293 drivers/clk/meson/axg-audio.c static AUD_MST_LRCLK_DIV(e, AUDIO_MST_E_SCLK_CTRL0);
e                 304 drivers/clk/meson/axg-audio.c static AUD_MST_LRCLK(e, AUDIO_MST_E_SCLK_CTRL1);
e                  61 drivers/clocksource/arm_arch_timer.c #define to_arch_timer(e) container_of(e, struct arch_timer, evt)
e                 136 drivers/connector/cn_proc.c 		ev->event_data.id.e.euid = from_kuid_munged(&init_user_ns, cred->euid);
e                 139 drivers/connector/cn_proc.c 		ev->event_data.id.e.egid = from_kgid_munged(&init_user_ns, cred->egid);
e                  41 drivers/counter/counter.c 	const struct counter_signal_enum_ext *const e = priv;
e                  45 drivers/counter/counter.c 	if (!e->get)
e                  48 drivers/counter/counter.c 	err = e->get(counter, signal, &index);
e                  52 drivers/counter/counter.c 	if (index >= e->num_items)
e                  55 drivers/counter/counter.c 	return sprintf(buf, "%s\n", e->items[index]);
e                  63 drivers/counter/counter.c 	const struct counter_signal_enum_ext *const e = priv;
e                  67 drivers/counter/counter.c 	if (!e->set)
e                  70 drivers/counter/counter.c 	index = __sysfs_match_string(e->items, e->num_items, buf);
e                  74 drivers/counter/counter.c 	err = e->set(counter, signal, index);
e                  86 drivers/counter/counter.c 	const struct counter_signal_enum_ext *const e = priv;
e                  90 drivers/counter/counter.c 	if (!e->num_items)
e                  93 drivers/counter/counter.c 	for (i = 0; i < e->num_items; i++)
e                  94 drivers/counter/counter.c 		len += sprintf(buf + len, "%s\n", e->items[i]);
e                 104 drivers/counter/counter.c 	const struct counter_count_enum_ext *const e = priv;
e                 108 drivers/counter/counter.c 	if (!e->get)
e                 111 drivers/counter/counter.c 	err = e->get(counter, count, &index);
e                 115 drivers/counter/counter.c 	if (index >= e->num_items)
e                 118 drivers/counter/counter.c 	return sprintf(buf, "%s\n", e->items[index]);
e                 126 drivers/counter/counter.c 	const struct counter_count_enum_ext *const e = priv;
e                 130 drivers/counter/counter.c 	if (!e->set)
e                 133 drivers/counter/counter.c 	index = __sysfs_match_string(e->items, e->num_items, buf);
e                 137 drivers/counter/counter.c 	err = e->set(counter, count, index);
e                 149 drivers/counter/counter.c 	const struct counter_count_enum_ext *const e = priv;
e                 153 drivers/counter/counter.c 	if (!e->num_items)
e                 156 drivers/counter/counter.c 	for (i = 0; i < e->num_items; i++)
e                 157 drivers/counter/counter.c 		len += sprintf(buf + len, "%s\n", e->items[i]);
e                 166 drivers/counter/counter.c 	const struct counter_device_enum_ext *const e = priv;
e                 170 drivers/counter/counter.c 	if (!e->get)
e                 173 drivers/counter/counter.c 	err = e->get(counter, &index);
e                 177 drivers/counter/counter.c 	if (index >= e->num_items)
e                 180 drivers/counter/counter.c 	return sprintf(buf, "%s\n", e->items[index]);
e                 187 drivers/counter/counter.c 	const struct counter_device_enum_ext *const e = priv;
e                 191 drivers/counter/counter.c 	if (!e->set)
e                 194 drivers/counter/counter.c 	index = __sysfs_match_string(e->items, e->num_items, buf);
e                 198 drivers/counter/counter.c 	err = e->set(counter, index);
e                 209 drivers/counter/counter.c 	const struct counter_device_enum_ext *const e = priv;
e                 213 drivers/counter/counter.c 	if (!e->num_items)
e                 216 drivers/counter/counter.c 	for (i = 0; i < e->num_items; i++)
e                 217 drivers/counter/counter.c 		len += sprintf(buf + len, "%s\n", e->items[i]);
e                 379 drivers/crypto/caam/caampkc.c 	pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
e                 633 drivers/crypto/caam/caampkc.c 	if (unlikely(!key->n || !key->e))
e                 802 drivers/crypto/caam/caampkc.c 	kfree(key->e);
e                 885 drivers/crypto/caam/caampkc.c 	rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
e                 886 drivers/crypto/caam/caampkc.c 	if (!rsa_key->e)
e                 991 drivers/crypto/caam/caampkc.c 	rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
e                 992 drivers/crypto/caam/caampkc.c 	if (!rsa_key->e)
e                  71 drivers/crypto/caam/caampkc.h 	u8 *e;
e                 121 drivers/crypto/cavium/cpt/cptvf_algs.c 	fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
e                 122 drivers/crypto/cavium/cpt/cptvf_algs.c 	fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
e                 123 drivers/crypto/cavium/cpt/cptvf_algs.c 	fctx->enc.enc_ctrl.e.iv_source = FROM_DPTR;
e                  74 drivers/crypto/cavium/cpt/cptvf_algs.h 	} e;
e                 157 drivers/crypto/ccp/ccp-crypto-rsa.c 					raw_key.e, raw_key.e_sz);
e                  97 drivers/crypto/ccp/ccp-dev.c void ccp_log_error(struct ccp_device *d, unsigned int e)
e                  99 drivers/crypto/ccp/ccp-dev.c 	if (WARN_ON(e >= CCP_MAX_ERROR_CODE))
e                 102 drivers/crypto/ccp/ccp-dev.c 	if (e < ARRAY_SIZE(ccp_error_codes))
e                 103 drivers/crypto/ccp/ccp-dev.c 		dev_err(d->dev, "CCP error %d: %s\n", e, ccp_error_codes[e]);
e                 105 drivers/crypto/ccp/ccp-dev.c 		dev_err(d->dev, "CCP error %d: Unknown Error\n", e);
e                1855 drivers/crypto/chelsio/chtls/chtls_cm.c 		struct l2t_entry *e = csk->l2t_entry;
e                1857 drivers/crypto/chelsio/chtls/chtls_cm.c 		if (e && sk->sk_state != TCP_SYN_RECV) {
e                1858 drivers/crypto/chelsio/chtls/chtls_cm.c 			cxgb4_l2t_send(csk->egress_dev, reply_skb, e);
e                1901 drivers/crypto/chelsio/chtls/chtls_cm.c 		struct l2t_entry *e = csk->l2t_entry;
e                1903 drivers/crypto/chelsio/chtls/chtls_cm.c 		if (e && sk->sk_state != TCP_SYN_RECV) {
e                1904 drivers/crypto/chelsio/chtls/chtls_cm.c 			cxgb4_l2t_send(csk->egress_dev, reply_skb, e);
e                  71 drivers/crypto/qat/qat_common/qat_asym_algs.c 			dma_addr_t e;
e                 105 drivers/crypto/qat/qat_common/qat_asym_algs.c 	char *e;
e                 697 drivers/crypto/qat/qat_common/qat_asym_algs.c 	if (unlikely(!ctx->n || !ctx->e))
e                 719 drivers/crypto/qat/qat_common/qat_asym_algs.c 	qat_req->in.rsa.enc.e = ctx->dma_e;
e                1017 drivers/crypto/qat/qat_common/qat_asym_algs.c 		ctx->e = NULL;
e                1021 drivers/crypto/qat/qat_common/qat_asym_algs.c 	ctx->e = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
e                1022 drivers/crypto/qat/qat_common/qat_asym_algs.c 	if (!ctx->e)
e                1025 drivers/crypto/qat/qat_common/qat_asym_algs.c 	memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen);
e                1162 drivers/crypto/qat/qat_common/qat_asym_algs.c 	if (ctx->e)
e                1163 drivers/crypto/qat/qat_common/qat_asym_algs.c 		dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
e                1190 drivers/crypto/qat/qat_common/qat_asym_algs.c 	ctx->e = NULL;
e                1221 drivers/crypto/qat/qat_common/qat_asym_algs.c 	ret = qat_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
e                1231 drivers/crypto/qat/qat_common/qat_asym_algs.c 	if (!ctx->n || !ctx->e) {
e                1288 drivers/crypto/qat/qat_common/qat_asym_algs.c 	if (ctx->e)
e                1289 drivers/crypto/qat/qat_common/qat_asym_algs.c 		dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
e                1296 drivers/crypto/qat/qat_common/qat_asym_algs.c 	ctx->e = NULL;
e                1048 drivers/edac/edac_mc.c 			      struct edac_raw_error_desc *e)
e                1051 drivers/edac/edac_mc.c 	int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer };
e                1057 drivers/edac/edac_mc.c 			e->page_frame_number, e->offset_in_page,
e                1058 drivers/edac/edac_mc.c 			e->grain, e->syndrome);
e                1059 drivers/edac/edac_mc.c 		edac_ce_error(mci, e->error_count, pos, e->msg, e->location, e->label,
e                1060 drivers/edac/edac_mc.c 			      detail, e->other_detail, e->enable_per_layer_report,
e                1061 drivers/edac/edac_mc.c 			      e->page_frame_number, e->offset_in_page, e->grain);
e                1065 drivers/edac/edac_mc.c 			e->page_frame_number, e->offset_in_page, e->grain);
e                1067 drivers/edac/edac_mc.c 		edac_ue_error(mci, e->error_count, pos, e->msg, e->location, e->label,
e                1068 drivers/edac/edac_mc.c 			      detail, e->other_detail, e->enable_per_layer_report);
e                1092 drivers/edac/edac_mc.c 	struct edac_raw_error_desc *e = &mci->error_desc;
e                1097 drivers/edac/edac_mc.c 	memset(e, 0, sizeof (*e));
e                1098 drivers/edac/edac_mc.c 	e->error_count = error_count;
e                1099 drivers/edac/edac_mc.c 	e->top_layer = top_layer;
e                1100 drivers/edac/edac_mc.c 	e->mid_layer = mid_layer;
e                1101 drivers/edac/edac_mc.c 	e->low_layer = low_layer;
e                1102 drivers/edac/edac_mc.c 	e->page_frame_number = page_frame_number;
e                1103 drivers/edac/edac_mc.c 	e->offset_in_page = offset_in_page;
e                1104 drivers/edac/edac_mc.c 	e->syndrome = syndrome;
e                1105 drivers/edac/edac_mc.c 	e->msg = msg;
e                1106 drivers/edac/edac_mc.c 	e->other_detail = other_detail;
e                1130 drivers/edac/edac_mc.c 			e->enable_per_layer_report = true;
e                1144 drivers/edac/edac_mc.c 	p = e->label;
e                1158 drivers/edac/edac_mc.c 		if (dimm->grain > e->grain)
e                1159 drivers/edac/edac_mc.c 			e->grain = dimm->grain;
e                1167 drivers/edac/edac_mc.c 		if (e->enable_per_layer_report && dimm->nr_pages) {
e                1169 drivers/edac/edac_mc.c 				e->enable_per_layer_report = false;
e                1173 drivers/edac/edac_mc.c 			if (p != e->label) {
e                1200 drivers/edac/edac_mc.c 	if (!e->enable_per_layer_report) {
e                1201 drivers/edac/edac_mc.c 		strcpy(e->label, "any memory");
e                1204 drivers/edac/edac_mc.c 		if (p == e->label)
e                1205 drivers/edac/edac_mc.c 			strcpy(e->label, "unknown memory");
e                1218 drivers/edac/edac_mc.c 	p = e->location;
e                1228 drivers/edac/edac_mc.c 	if (p > e->location)
e                1232 drivers/edac/edac_mc.c 	if (WARN_ON_ONCE(!e->grain))
e                1233 drivers/edac/edac_mc.c 		e->grain = 1;
e                1235 drivers/edac/edac_mc.c 	grain_bits = fls_long(e->grain - 1);
e                1239 drivers/edac/edac_mc.c 		trace_mc_event(type, e->msg, e->label, e->error_count,
e                1240 drivers/edac/edac_mc.c 			       mci->mc_idx, e->top_layer, e->mid_layer,
e                1241 drivers/edac/edac_mc.c 			       e->low_layer,
e                1242 drivers/edac/edac_mc.c 			       (e->page_frame_number << PAGE_SHIFT) | e->offset_in_page,
e                1243 drivers/edac/edac_mc.c 			       grain_bits, e->syndrome, e->other_detail);
e                1245 drivers/edac/edac_mc.c 	edac_raw_mc_handle_error(type, mci, e);
e                 225 drivers/edac/edac_mc.h 			      struct edac_raw_error_desc *e);
e                 207 drivers/edac/ghes_edac.c 	struct edac_raw_error_desc *e;
e                 229 drivers/edac/ghes_edac.c 	e = &mci->error_desc;
e                 232 drivers/edac/ghes_edac.c 	memset(e, 0, sizeof (*e));
e                 233 drivers/edac/ghes_edac.c 	e->error_count = 1;
e                 234 drivers/edac/ghes_edac.c 	e->grain = 1;
e                 235 drivers/edac/ghes_edac.c 	strcpy(e->label, "unknown label");
e                 236 drivers/edac/ghes_edac.c 	e->msg = pvt->msg;
e                 237 drivers/edac/ghes_edac.c 	e->other_detail = pvt->other_detail;
e                 238 drivers/edac/ghes_edac.c 	e->top_layer = -1;
e                 239 drivers/edac/ghes_edac.c 	e->mid_layer = -1;
e                 240 drivers/edac/ghes_edac.c 	e->low_layer = -1;
e                 324 drivers/edac/ghes_edac.c 		e->page_frame_number = mem_err->physical_addr >> PAGE_SHIFT;
e                 325 drivers/edac/ghes_edac.c 		e->offset_in_page = mem_err->physical_addr & ~PAGE_MASK;
e                 330 drivers/edac/ghes_edac.c 		e->grain = ~mem_err->physical_addr_mask + 1;
e                 333 drivers/edac/ghes_edac.c 	p = e->location;
e                 363 drivers/edac/ghes_edac.c 			e->top_layer = index;
e                 364 drivers/edac/ghes_edac.c 			e->enable_per_layer_report = true;
e                 368 drivers/edac/ghes_edac.c 	if (p > e->location)
e                 447 drivers/edac/ghes_edac.c 	if (WARN_ON_ONCE(!e->grain))
e                 448 drivers/edac/ghes_edac.c 		e->grain = 1;
e                 450 drivers/edac/ghes_edac.c 	grain_bits = fls_long(e->grain - 1);
e                 454 drivers/edac/ghes_edac.c 		 "APEI location: %s %s", e->location, e->other_detail);
e                 455 drivers/edac/ghes_edac.c 	trace_mc_event(type, e->msg, e->label, e->error_count,
e                 456 drivers/edac/ghes_edac.c 		       mci->mc_idx, e->top_layer, e->mid_layer, e->low_layer,
e                 457 drivers/edac/ghes_edac.c 		       (e->page_frame_number << PAGE_SHIFT) | e->offset_in_page,
e                 458 drivers/edac/ghes_edac.c 		       grain_bits, e->syndrome, pvt->detail_location);
e                 460 drivers/edac/ghes_edac.c 	edac_raw_mc_handle_error(type, mci, e);
e                 377 drivers/firewire/core-cdev.c 	struct bus_reset_event *e;
e                 379 drivers/firewire/core-cdev.c 	e = kzalloc(sizeof(*e), GFP_KERNEL);
e                 380 drivers/firewire/core-cdev.c 	if (e == NULL)
e                 383 drivers/firewire/core-cdev.c 	fill_bus_reset_event(&e->reset, client);
e                 385 drivers/firewire/core-cdev.c 	queue_event(client, &e->event,
e                 386 drivers/firewire/core-cdev.c 		    &e->reset, sizeof(e->reset), NULL, 0);
e                 538 drivers/firewire/core-cdev.c 	struct outbound_transaction_event *e = data;
e                 539 drivers/firewire/core-cdev.c 	struct fw_cdev_event_response *rsp = &e->response;
e                 540 drivers/firewire/core-cdev.c 	struct client *client = e->client;
e                 549 drivers/firewire/core-cdev.c 	idr_remove(&client->resource_idr, e->r.resource.handle);
e                 565 drivers/firewire/core-cdev.c 		queue_event(client, &e->event, rsp, sizeof(*rsp),
e                 568 drivers/firewire/core-cdev.c 		queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
e                 579 drivers/firewire/core-cdev.c 	struct outbound_transaction_event *e;
e                 590 drivers/firewire/core-cdev.c 	e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
e                 591 drivers/firewire/core-cdev.c 	if (e == NULL)
e                 594 drivers/firewire/core-cdev.c 	e->client = client;
e                 595 drivers/firewire/core-cdev.c 	e->response.length = request->length;
e                 596 drivers/firewire/core-cdev.c 	e->response.closure = request->closure;
e                 599 drivers/firewire/core-cdev.c 	    copy_from_user(e->response.data,
e                 605 drivers/firewire/core-cdev.c 	e->r.resource.release = release_transaction;
e                 606 drivers/firewire/core-cdev.c 	ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
e                 610 drivers/firewire/core-cdev.c 	fw_send_request(client->device->card, &e->r.transaction,
e                 612 drivers/firewire/core-cdev.c 			speed, request->offset, e->response.data,
e                 613 drivers/firewire/core-cdev.c 			request->length, complete_transaction, e);
e                 617 drivers/firewire/core-cdev.c 	kfree(e);
e                 672 drivers/firewire/core-cdev.c 	struct inbound_transaction_event *e;
e                 681 drivers/firewire/core-cdev.c 	e = kmalloc(sizeof(*e), GFP_ATOMIC);
e                 682 drivers/firewire/core-cdev.c 	if (r == NULL || e == NULL)
e                 708 drivers/firewire/core-cdev.c 		struct fw_cdev_event_request *req = &e->req.request;
e                 721 drivers/firewire/core-cdev.c 		struct fw_cdev_event_request2 *req = &e->req.request2;
e                 736 drivers/firewire/core-cdev.c 	queue_event(handler->client, &e->event,
e                 737 drivers/firewire/core-cdev.c 		    &e->req, event_size0, r->data, length);
e                 742 drivers/firewire/core-cdev.c 	kfree(e);
e                 915 drivers/firewire/core-cdev.c 	struct iso_interrupt_event *e;
e                 917 drivers/firewire/core-cdev.c 	e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC);
e                 918 drivers/firewire/core-cdev.c 	if (e == NULL)
e                 921 drivers/firewire/core-cdev.c 	e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT;
e                 922 drivers/firewire/core-cdev.c 	e->interrupt.closure   = client->iso_closure;
e                 923 drivers/firewire/core-cdev.c 	e->interrupt.cycle     = cycle;
e                 924 drivers/firewire/core-cdev.c 	e->interrupt.header_length = header_length;
e                 925 drivers/firewire/core-cdev.c 	memcpy(e->interrupt.header, header, header_length);
e                 926 drivers/firewire/core-cdev.c 	queue_event(client, &e->event, &e->interrupt,
e                 927 drivers/firewire/core-cdev.c 		    sizeof(e->interrupt) + header_length, NULL, 0);
e                 934 drivers/firewire/core-cdev.c 	struct iso_interrupt_mc_event *e;
e                 936 drivers/firewire/core-cdev.c 	e = kmalloc(sizeof(*e), GFP_ATOMIC);
e                 937 drivers/firewire/core-cdev.c 	if (e == NULL)
e                 940 drivers/firewire/core-cdev.c 	e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL;
e                 941 drivers/firewire/core-cdev.c 	e->interrupt.closure   = client->iso_closure;
e                 942 drivers/firewire/core-cdev.c 	e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer,
e                 944 drivers/firewire/core-cdev.c 	queue_event(client, &e->event, &e->interrupt,
e                 945 drivers/firewire/core-cdev.c 		    sizeof(e->interrupt), NULL, 0);
e                1236 drivers/firewire/core-cdev.c 	struct iso_resource_event *e;
e                1310 drivers/firewire/core-cdev.c 		e = r->e_alloc;
e                1313 drivers/firewire/core-cdev.c 		e = r->e_dealloc;
e                1316 drivers/firewire/core-cdev.c 	e->iso_resource.handle    = r->resource.handle;
e                1317 drivers/firewire/core-cdev.c 	e->iso_resource.channel   = channel;
e                1318 drivers/firewire/core-cdev.c 	e->iso_resource.bandwidth = bandwidth;
e                1320 drivers/firewire/core-cdev.c 	queue_event(client, &e->event,
e                1321 drivers/firewire/core-cdev.c 		    &e->iso_resource, sizeof(e->iso_resource), NULL, 0);
e                1483 drivers/firewire/core-cdev.c 	struct outbound_phy_packet_event *e =
e                1488 drivers/firewire/core-cdev.c 	case ACK_COMPLETE:	e->phy_packet.rcode = RCODE_COMPLETE;	break;
e                1490 drivers/firewire/core-cdev.c 	case ACK_PENDING:	e->phy_packet.rcode = RCODE_COMPLETE;	break;
e                1493 drivers/firewire/core-cdev.c 	case ACK_BUSY_B:	e->phy_packet.rcode = RCODE_BUSY;	break;
e                1494 drivers/firewire/core-cdev.c 	case ACK_DATA_ERROR:	e->phy_packet.rcode = RCODE_DATA_ERROR;	break;
e                1495 drivers/firewire/core-cdev.c 	case ACK_TYPE_ERROR:	e->phy_packet.rcode = RCODE_TYPE_ERROR;	break;
e                1497 drivers/firewire/core-cdev.c 	default:		e->phy_packet.rcode = status;		break;
e                1499 drivers/firewire/core-cdev.c 	e->phy_packet.data[0] = packet->timestamp;
e                1501 drivers/firewire/core-cdev.c 	queue_event(e->client, &e->event, &e->phy_packet,
e                1502 drivers/firewire/core-cdev.c 		    sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0);
e                1503 drivers/firewire/core-cdev.c 	client_put(e->client);
e                1510 drivers/firewire/core-cdev.c 	struct outbound_phy_packet_event *e;
e                1516 drivers/firewire/core-cdev.c 	e = kzalloc(sizeof(*e) + 4, GFP_KERNEL);
e                1517 drivers/firewire/core-cdev.c 	if (e == NULL)
e                1521 drivers/firewire/core-cdev.c 	e->client		= client;
e                1522 drivers/firewire/core-cdev.c 	e->p.speed		= SCODE_100;
e                1523 drivers/firewire/core-cdev.c 	e->p.generation		= a->generation;
e                1524 drivers/firewire/core-cdev.c 	e->p.header[0]		= TCODE_LINK_INTERNAL << 4;
e                1525 drivers/firewire/core-cdev.c 	e->p.header[1]		= a->data[0];
e                1526 drivers/firewire/core-cdev.c 	e->p.header[2]		= a->data[1];
e                1527 drivers/firewire/core-cdev.c 	e->p.header_length	= 12;
e                1528 drivers/firewire/core-cdev.c 	e->p.callback		= outbound_phy_packet_callback;
e                1529 drivers/firewire/core-cdev.c 	e->phy_packet.closure	= a->closure;
e                1530 drivers/firewire/core-cdev.c 	e->phy_packet.type	= FW_CDEV_EVENT_PHY_PACKET_SENT;
e                1532 drivers/firewire/core-cdev.c 			e->phy_packet.length = 4;
e                1534 drivers/firewire/core-cdev.c 	card->driver->send_request(card, &e->p);
e                1561 drivers/firewire/core-cdev.c 	struct inbound_phy_packet_event *e;
e                1567 drivers/firewire/core-cdev.c 		e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
e                1568 drivers/firewire/core-cdev.c 		if (e == NULL)
e                1571 drivers/firewire/core-cdev.c 		e->phy_packet.closure	= client->phy_receiver_closure;
e                1572 drivers/firewire/core-cdev.c 		e->phy_packet.type	= FW_CDEV_EVENT_PHY_PACKET_RECEIVED;
e                1573 drivers/firewire/core-cdev.c 		e->phy_packet.rcode	= RCODE_COMPLETE;
e                1574 drivers/firewire/core-cdev.c 		e->phy_packet.length	= 8;
e                1575 drivers/firewire/core-cdev.c 		e->phy_packet.data[0]	= p->header[1];
e                1576 drivers/firewire/core-cdev.c 		e->phy_packet.data[1]	= p->header[2];
e                1577 drivers/firewire/core-cdev.c 		queue_event(client, &e->event,
e                1578 drivers/firewire/core-cdev.c 			    &e->phy_packet, sizeof(e->phy_packet) + 8, NULL, 0);
e                 172 drivers/firmware/arm_sdei.c 	struct sdei_event *e, *found = NULL;
e                 177 drivers/firmware/arm_sdei.c 	list_for_each_entry(e, &sdei_list, list) {
e                 178 drivers/firmware/arm_sdei.c 		if (e->event_num == event_num) {
e                 179 drivers/firmware/arm_sdei.c 			found = e;
e                 987 drivers/firmware/dmi_scan.c 	char *e;
e                1005 drivers/firmware/dmi_scan.c 	year = simple_strtoul(y, &e, 10);
e                1006 drivers/firmware/dmi_scan.c 	if (y != e && year < 100) {	/* 2-digit year */
e                1015 drivers/firmware/dmi_scan.c 	month = simple_strtoul(s, &e, 10);
e                1016 drivers/firmware/dmi_scan.c 	if (s == e || *e != '/' || !month || month > 12) {
e                1021 drivers/firmware/dmi_scan.c 	s = e + 1;
e                1022 drivers/firmware/dmi_scan.c 	day = simple_strtoul(s, &e, 10);
e                1023 drivers/firmware/dmi_scan.c 	if (s == y || s == e || *e != '/' || day > 31)
e                  29 drivers/firmware/qcom_scm-64.c #define QCOM_SCM_ARGS_IMPL(num, a, b, c, d, e, f, g, h, i, j, ...) (\
e                  34 drivers/firmware/qcom_scm-64.c 			   (((e) & 0x3) << 12) | \
e                 405 drivers/firmware/qemu_fw_cfg.c static ssize_t fw_cfg_sysfs_show_size(struct fw_cfg_sysfs_entry *e, char *buf)
e                 407 drivers/firmware/qemu_fw_cfg.c 	return sprintf(buf, "%u\n", e->size);
e                 410 drivers/firmware/qemu_fw_cfg.c static ssize_t fw_cfg_sysfs_show_key(struct fw_cfg_sysfs_entry *e, char *buf)
e                 412 drivers/firmware/qemu_fw_cfg.c 	return sprintf(buf, "%u\n", e->select);
e                 415 drivers/firmware/qemu_fw_cfg.c static ssize_t fw_cfg_sysfs_show_name(struct fw_cfg_sysfs_entry *e, char *buf)
e                 417 drivers/firmware/qemu_fw_cfg.c 	return sprintf(buf, "%s\n", e->name);
e                  51 drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c 	struct amdgpu_bo_list_entry *e;
e                  53 drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c 	amdgpu_bo_list_for_each_entry(e, list) {
e                  54 drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c 		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
e                 192 drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c 	struct amdgpu_bo_list_entry *e;
e                 203 drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c 	amdgpu_bo_list_for_each_entry(e, list) {
e                 204 drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c 		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
e                 205 drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c 		unsigned priority = e->priority;
e                 208 drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c 			list_add_tail(&e->tv.head, &bucket[priority]);
e                 210 drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c 		e->user_pages = NULL;
e                  74 drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h #define amdgpu_bo_list_for_each_entry(e, list) \
e                  75 drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h 	for (e = amdgpu_bo_list_array_entry(list, 0); \
e                  76 drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h 	     e != amdgpu_bo_list_array_entry(list, (list)->num_entries); \
e                  77 drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h 	     ++e)
e                  79 drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h #define amdgpu_bo_list_for_each_userptr_entry(e, list) \
e                  80 drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h 	for (e = amdgpu_bo_list_array_entry(list, (list)->first_userptr); \
e                  81 drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h 	     e != amdgpu_bo_list_array_entry(list, (list)->num_entries); \
e                  82 drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h 	     ++e)
e                 575 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	struct amdgpu_bo_list_entry *e;
e                 602 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	amdgpu_bo_list_for_each_entry(e, p->bo_list)
e                 603 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		e->tv.num_shared = 2;
e                 619 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
e                 620 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
e                 624 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
e                 627 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		if (!e->user_pages) {
e                 632 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages);
e                 634 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 			kvfree(e->user_pages);
e                 635 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 			e->user_pages = NULL;
e                 640 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 			if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
e                 645 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		e->user_invalidated = userpage_invalidated;
e                 686 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
e                 687 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
e                 691 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 			e->tv.num_shared = 0;
e                 692 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		e->bo_va = amdgpu_vm_bo_find(vm, bo);
e                 724 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	struct amdgpu_bo_list_entry *e;
e                 727 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	list_for_each_entry(e, &p->validated, tv.head) {
e                 728 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
e                 790 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	struct amdgpu_bo_list_entry *e;
e                 888 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
e                 892 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		bo = ttm_to_amdgpu_bo(e->tv.bo);
e                 896 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		bo_va = e->bo_va;
e                 926 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		amdgpu_bo_list_for_each_entry(e, p->bo_list) {
e                 927 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 			struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
e                1278 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	struct amdgpu_bo_list_entry *e;
e                1299 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
e                1300 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
e                  30 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c #define to_amdgpu_ctx_entity(e)	\
e                  31 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 	container_of((e), struct amdgpu_ctx_entity, entity)
e                  27 drivers/gpu/drm/amd/amdgpu/amdgpu_display.h #define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
e                  28 drivers/gpu/drm/amd/amdgpu/amdgpu_display.h #define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
e                  35 drivers/gpu/drm/amd/amdgpu/amdgpu_display.h #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
e                 274 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h #define amdgpu_dpm_enable_bapm(adev, e) \
e                 275 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h 		((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
e                 134 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 	struct amdgpu_sync_entry *e;
e                 136 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 	hash_for_each_possible(sync->fences, e, node, f->context) {
e                 137 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 		if (unlikely(e->fence->context != f->context))
e                 140 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 		amdgpu_sync_keep_later(&e->fence, f);
e                 143 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 		e->explicit |= explicit;
e                 160 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 	struct amdgpu_sync_entry *e;
e                 171 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 	e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
e                 172 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 	if (!e)
e                 175 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 	e->explicit = explicit;
e                 177 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 	hash_add(sync->fences, &e->node, f->context);
e                 178 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 	e->fence = dma_fence_get(f);
e                 261 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 	struct amdgpu_sync_entry *e;
e                 265 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
e                 266 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 		struct dma_fence *f = e->fence;
e                 270 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 			hash_del(&e->node);
e                 272 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 			kmem_cache_free(amdgpu_sync_slab, e);
e                 303 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 	struct amdgpu_sync_entry *e;
e                 307 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
e                 309 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 		f = e->fence;
e                 311 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 			*explicit = e->explicit;
e                 313 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 		hash_del(&e->node);
e                 314 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 		kmem_cache_free(amdgpu_sync_slab, e);
e                 335 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 	struct amdgpu_sync_entry *e;
e                 340 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 	hash_for_each_safe(source->fences, i, tmp, e, node) {
e                 341 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 		f = e->fence;
e                 343 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 			r = amdgpu_sync_fence(NULL, clone, f, e->explicit);
e                 347 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 			hash_del(&e->node);
e                 349 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 			kmem_cache_free(amdgpu_sync_slab, e);
e                 361 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 	struct amdgpu_sync_entry *e;
e                 365 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
e                 366 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 		r = dma_fence_wait(e->fence, intr);
e                 370 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 		hash_del(&e->node);
e                 371 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 		dma_fence_put(e->fence);
e                 372 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 		kmem_cache_free(amdgpu_sync_slab, e);
e                 387 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 	struct amdgpu_sync_entry *e;
e                 391 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
e                 392 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 		hash_del(&e->node);
e                 393 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 		dma_fence_put(e->fence);
e                 394 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 		kmem_cache_free(amdgpu_sync_slab, e);
e                 490 drivers/gpu/drm/amd/amdgpu/cikd.h #define SDMA_PACKET(op, sub_op, e)	((((e) & 0xFFFF) << 16) |	\
e                 343 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c #define AMDGPU_RAS_SUB_BLOCK(subblock, a, b, c, d, e, f, g, h)                             \
e                 348 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		(((e) << 1) | ((f) << 3) | (g) | ((h) << 2)),                  \
e                 272 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 	struct drm_pending_vblank_event *e;
e                 299 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 	e = amdgpu_crtc->event;
e                 302 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 	if (!e)
e                 322 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 		if (e) {
e                 323 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
e                 328 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 	} else if (e) {
e                 343 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
e                 344 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 		e->pipe = amdgpu_crtc->crtc_id;
e                 346 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 		list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
e                 347 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 		e = NULL;
e                 363 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 			 vrr_active, (int) !e);
e                  63 drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h static inline double dml_max5(double a, double b, double c, double d, double e)
e                  65 drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h 	return dml_max(dml_max4(a, b, c, d), e);
e                 391 drivers/gpu/drm/bridge/cdns-dsi.c #define STS_CTL_EDGE(e)			((e) << 16)
e                 898 drivers/gpu/drm/drm_atomic_uapi.c 	struct drm_pending_vblank_event *e = NULL;
e                 900 drivers/gpu/drm/drm_atomic_uapi.c 	e = kzalloc(sizeof *e, GFP_KERNEL);
e                 901 drivers/gpu/drm/drm_atomic_uapi.c 	if (!e)
e                 904 drivers/gpu/drm/drm_atomic_uapi.c 	e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
e                 905 drivers/gpu/drm/drm_atomic_uapi.c 	e->event.base.length = sizeof(e->event);
e                 906 drivers/gpu/drm/drm_atomic_uapi.c 	e->event.vbl.crtc_id = crtc->base.id;
e                 907 drivers/gpu/drm/drm_atomic_uapi.c 	e->event.vbl.user_data = user_data;
e                 909 drivers/gpu/drm/drm_atomic_uapi.c 	return e;
e                1125 drivers/gpu/drm/drm_atomic_uapi.c 			struct drm_pending_vblank_event *e;
e                1127 drivers/gpu/drm/drm_atomic_uapi.c 			e = create_vblank_event(crtc, arg->user_data);
e                1128 drivers/gpu/drm/drm_atomic_uapi.c 			if (!e)
e                1131 drivers/gpu/drm/drm_atomic_uapi.c 			crtc_state->event = e;
e                1135 drivers/gpu/drm/drm_atomic_uapi.c 			struct drm_pending_vblank_event *e = crtc_state->event;
e                1140 drivers/gpu/drm/drm_atomic_uapi.c 			ret = drm_event_reserve_init(dev, file_priv, &e->base,
e                1141 drivers/gpu/drm/drm_atomic_uapi.c 						     &e->event.base);
e                1143 drivers/gpu/drm/drm_atomic_uapi.c 				kfree(e);
e                 175 drivers/gpu/drm/drm_file.c 	struct drm_pending_event *e, *et;
e                 181 drivers/gpu/drm/drm_file.c 	list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
e                 183 drivers/gpu/drm/drm_file.c 		list_del(&e->pending_link);
e                 184 drivers/gpu/drm/drm_file.c 		e->file_priv = NULL;
e                 188 drivers/gpu/drm/drm_file.c 	list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
e                 189 drivers/gpu/drm/drm_file.c 		list_del(&e->link);
e                 190 drivers/gpu/drm/drm_file.c 		kfree(e);
e                 497 drivers/gpu/drm/drm_file.c 		struct drm_pending_event *e = NULL;
e                 501 drivers/gpu/drm/drm_file.c 			e = list_first_entry(&file_priv->event_list,
e                 503 drivers/gpu/drm/drm_file.c 			file_priv->event_space += e->event->length;
e                 504 drivers/gpu/drm/drm_file.c 			list_del(&e->link);
e                 508 drivers/gpu/drm/drm_file.c 		if (e == NULL) {
e                 525 drivers/gpu/drm/drm_file.c 			unsigned length = e->event->length;
e                 531 drivers/gpu/drm/drm_file.c 				list_add(&e->link, &file_priv->event_list);
e                 537 drivers/gpu/drm/drm_file.c 			if (copy_to_user(buffer + ret, e->event, length)) {
e                 544 drivers/gpu/drm/drm_file.c 			kfree(e);
e                 610 drivers/gpu/drm/drm_file.c 				  struct drm_event *e)
e                 612 drivers/gpu/drm/drm_file.c 	if (file_priv->event_space < e->length)
e                 615 drivers/gpu/drm/drm_file.c 	file_priv->event_space -= e->length;
e                 617 drivers/gpu/drm/drm_file.c 	p->event = e;
e                 652 drivers/gpu/drm/drm_file.c 			   struct drm_event *e)
e                 658 drivers/gpu/drm/drm_file.c 	ret = drm_event_reserve_init_locked(dev, file_priv, p, e);
e                 706 drivers/gpu/drm/drm_file.c void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
e                 710 drivers/gpu/drm/drm_file.c 	if (e->completion) {
e                 711 drivers/gpu/drm/drm_file.c 		complete_all(e->completion);
e                 712 drivers/gpu/drm/drm_file.c 		e->completion_release(e->completion);
e                 713 drivers/gpu/drm/drm_file.c 		e->completion = NULL;
e                 716 drivers/gpu/drm/drm_file.c 	if (e->fence) {
e                 717 drivers/gpu/drm/drm_file.c 		dma_fence_signal(e->fence);
e                 718 drivers/gpu/drm/drm_file.c 		dma_fence_put(e->fence);
e                 721 drivers/gpu/drm/drm_file.c 	if (!e->file_priv) {
e                 722 drivers/gpu/drm/drm_file.c 		kfree(e);
e                 726 drivers/gpu/drm/drm_file.c 	list_del(&e->pending_link);
e                 727 drivers/gpu/drm/drm_file.c 	list_add_tail(&e->link,
e                 728 drivers/gpu/drm/drm_file.c 		      &e->file_priv->event_list);
e                 729 drivers/gpu/drm/drm_file.c 	wake_up_interruptible(&e->file_priv->event_wait);
e                 748 drivers/gpu/drm/drm_file.c void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
e                 753 drivers/gpu/drm/drm_file.c 	drm_send_event_locked(dev, e);
e                1041 drivers/gpu/drm/drm_plane.c 	struct drm_pending_vblank_event *e = NULL;
e                1163 drivers/gpu/drm/drm_plane.c 		e = kzalloc(sizeof *e, GFP_KERNEL);
e                1164 drivers/gpu/drm/drm_plane.c 		if (!e) {
e                1169 drivers/gpu/drm/drm_plane.c 		e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
e                1170 drivers/gpu/drm/drm_plane.c 		e->event.base.length = sizeof(e->event);
e                1171 drivers/gpu/drm/drm_plane.c 		e->event.vbl.user_data = page_flip->user_data;
e                1172 drivers/gpu/drm/drm_plane.c 		e->event.vbl.crtc_id = crtc->base.id;
e                1174 drivers/gpu/drm/drm_plane.c 		ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
e                1176 drivers/gpu/drm/drm_plane.c 			kfree(e);
e                1177 drivers/gpu/drm/drm_plane.c 			e = NULL;
e                1184 drivers/gpu/drm/drm_plane.c 		ret = crtc->funcs->page_flip_target(crtc, fb, e,
e                1189 drivers/gpu/drm/drm_plane.c 		ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags,
e                1193 drivers/gpu/drm/drm_plane.c 			drm_event_cancel_free(dev, &e->base);
e                 830 drivers/gpu/drm/drm_vblank.c 		struct drm_pending_vblank_event *e,
e                 835 drivers/gpu/drm/drm_vblank.c 	switch (e->event.base.type) {
e                 839 drivers/gpu/drm/drm_vblank.c 		e->event.vbl.sequence = seq;
e                 845 drivers/gpu/drm/drm_vblank.c 		e->event.vbl.tv_sec = tv.tv_sec;
e                 846 drivers/gpu/drm/drm_vblank.c 		e->event.vbl.tv_usec = tv.tv_nsec / 1000;
e                 850 drivers/gpu/drm/drm_vblank.c 			e->event.seq.sequence = seq;
e                 851 drivers/gpu/drm/drm_vblank.c 		e->event.seq.time_ns = ktime_to_ns(now);
e                 854 drivers/gpu/drm/drm_vblank.c 	trace_drm_vblank_event_delivered(e->base.file_priv, e->pipe, seq);
e                 855 drivers/gpu/drm/drm_vblank.c 	drm_send_event_locked(dev, &e->base);
e                 897 drivers/gpu/drm/drm_vblank.c 			       struct drm_pending_vblank_event *e)
e                 904 drivers/gpu/drm/drm_vblank.c 	e->pipe = pipe;
e                 905 drivers/gpu/drm/drm_vblank.c 	e->sequence = drm_crtc_accurate_vblank_count(crtc) + 1;
e                 906 drivers/gpu/drm/drm_vblank.c 	list_add_tail(&e->base.link, &dev->vblank_event_list);
e                 922 drivers/gpu/drm/drm_vblank.c 				struct drm_pending_vblank_event *e)
e                 936 drivers/gpu/drm/drm_vblank.c 	e->pipe = pipe;
e                 937 drivers/gpu/drm/drm_vblank.c 	send_vblank_event(dev, e, seq, now);
e                1137 drivers/gpu/drm/drm_vblank.c 	struct drm_pending_vblank_event *e, *t;
e                1172 drivers/gpu/drm/drm_vblank.c 	list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
e                1173 drivers/gpu/drm/drm_vblank.c 		if (e->pipe != pipe)
e                1177 drivers/gpu/drm/drm_vblank.c 			  e->sequence, seq);
e                1178 drivers/gpu/drm/drm_vblank.c 		list_del(&e->base.link);
e                1180 drivers/gpu/drm/drm_vblank.c 		send_vblank_event(dev, e, seq, now);
e                1456 drivers/gpu/drm/drm_vblank.c 	struct drm_pending_vblank_event *e;
e                1462 drivers/gpu/drm/drm_vblank.c 	e = kzalloc(sizeof(*e), GFP_KERNEL);
e                1463 drivers/gpu/drm/drm_vblank.c 	if (e == NULL) {
e                1468 drivers/gpu/drm/drm_vblank.c 	e->pipe = pipe;
e                1469 drivers/gpu/drm/drm_vblank.c 	e->event.base.type = DRM_EVENT_VBLANK;
e                1470 drivers/gpu/drm/drm_vblank.c 	e->event.base.length = sizeof(e->event.vbl);
e                1471 drivers/gpu/drm/drm_vblank.c 	e->event.vbl.user_data = vblwait->request.signal;
e                1472 drivers/gpu/drm/drm_vblank.c 	e->event.vbl.crtc_id = 0;
e                1476 drivers/gpu/drm/drm_vblank.c 			e->event.vbl.crtc_id = crtc->base.id;
e                1492 drivers/gpu/drm/drm_vblank.c 	ret = drm_event_reserve_init_locked(dev, file_priv, &e->base,
e                1493 drivers/gpu/drm/drm_vblank.c 					    &e->event.base);
e                1505 drivers/gpu/drm/drm_vblank.c 	e->sequence = req_seq;
e                1508 drivers/gpu/drm/drm_vblank.c 		send_vblank_event(dev, e, seq, now);
e                1512 drivers/gpu/drm/drm_vblank.c 		list_add_tail(&e->base.link, &dev->vblank_event_list);
e                1522 drivers/gpu/drm/drm_vblank.c 	kfree(e);
e                1712 drivers/gpu/drm/drm_vblank.c 	struct drm_pending_vblank_event *e, *t;
e                1720 drivers/gpu/drm/drm_vblank.c 	list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
e                1721 drivers/gpu/drm/drm_vblank.c 		if (e->pipe != pipe)
e                1723 drivers/gpu/drm/drm_vblank.c 		if (!vblank_passed(seq, e->sequence))
e                1727 drivers/gpu/drm/drm_vblank.c 			  e->sequence, seq);
e                1729 drivers/gpu/drm/drm_vblank.c 		list_del(&e->base.link);
e                1731 drivers/gpu/drm/drm_vblank.c 		send_vblank_event(dev, e, seq, now);
e                1888 drivers/gpu/drm/drm_vblank.c 	struct drm_pending_vblank_event *e;
e                1915 drivers/gpu/drm/drm_vblank.c 	e = kzalloc(sizeof(*e), GFP_KERNEL);
e                1916 drivers/gpu/drm/drm_vblank.c 	if (e == NULL)
e                1934 drivers/gpu/drm/drm_vblank.c 	e->pipe = pipe;
e                1935 drivers/gpu/drm/drm_vblank.c 	e->event.base.type = DRM_EVENT_CRTC_SEQUENCE;
e                1936 drivers/gpu/drm/drm_vblank.c 	e->event.base.length = sizeof(e->event.seq);
e                1937 drivers/gpu/drm/drm_vblank.c 	e->event.seq.user_data = queue_seq->user_data;
e                1952 drivers/gpu/drm/drm_vblank.c 	ret = drm_event_reserve_init_locked(dev, file_priv, &e->base,
e                1953 drivers/gpu/drm/drm_vblank.c 					    &e->event.base);
e                1958 drivers/gpu/drm/drm_vblank.c 	e->sequence = req_seq;
e                1962 drivers/gpu/drm/drm_vblank.c 		send_vblank_event(dev, e, seq, now);
e                1966 drivers/gpu/drm/drm_vblank.c 		list_add_tail(&e->base.link, &dev->vblank_event_list);
e                1977 drivers/gpu/drm/drm_vblank.c 	kfree(e);
e                 395 drivers/gpu/drm/exynos/exynos5433_drm_decon.c #define BIT_VAL(x, e, s) (((x) & ((1 << ((e) - (s) + 1)) - 1)) << (s))
e                  36 drivers/gpu/drm/exynos/exynos_drm_dpi.c static inline struct exynos_dpi *encoder_to_dpi(struct drm_encoder *e)
e                  38 drivers/gpu/drm/exynos/exynos_drm_dpi.c 	return container_of(e, struct exynos_dpi, encoder);
e                 288 drivers/gpu/drm/exynos/exynos_drm_dsi.c static inline struct exynos_dsi *encoder_to_dsi(struct drm_encoder *e)
e                 290 drivers/gpu/drm/exynos/exynos_drm_dsi.c 	return container_of(e, struct exynos_dsi, encoder);
e                 913 drivers/gpu/drm/exynos/exynos_drm_g2d.c 	struct drm_exynos_pending_g2d_event *e;
e                 919 drivers/gpu/drm/exynos/exynos_drm_g2d.c 	e = list_first_entry(&runqueue_node->event_list,
e                 923 drivers/gpu/drm/exynos/exynos_drm_g2d.c 	e->event.tv_sec = now.tv_sec;
e                 924 drivers/gpu/drm/exynos/exynos_drm_g2d.c 	e->event.tv_usec = now.tv_nsec / NSEC_PER_USEC;
e                 925 drivers/gpu/drm/exynos/exynos_drm_g2d.c 	e->event.cmdlist_no = cmdlist_no;
e                 927 drivers/gpu/drm/exynos/exynos_drm_g2d.c 	drm_send_event(drm_dev, &e->base);
e                1144 drivers/gpu/drm/exynos/exynos_drm_g2d.c 	struct drm_exynos_pending_g2d_event *e;
e                1168 drivers/gpu/drm/exynos/exynos_drm_g2d.c 		e = kzalloc(sizeof(*node->event), GFP_KERNEL);
e                1169 drivers/gpu/drm/exynos/exynos_drm_g2d.c 		if (!e) {
e                1174 drivers/gpu/drm/exynos/exynos_drm_g2d.c 		e->event.base.type = DRM_EXYNOS_G2D_EVENT;
e                1175 drivers/gpu/drm/exynos/exynos_drm_g2d.c 		e->event.base.length = sizeof(e->event);
e                1176 drivers/gpu/drm/exynos/exynos_drm_g2d.c 		e->event.user_data = req->user_data;
e                1178 drivers/gpu/drm/exynos/exynos_drm_g2d.c 		ret = drm_event_reserve_init(drm_dev, file, &e->base, &e->event.base);
e                1180 drivers/gpu/drm/exynos/exynos_drm_g2d.c 			kfree(e);
e                1184 drivers/gpu/drm/exynos/exynos_drm_g2d.c 		node->event = e;
e                 697 drivers/gpu/drm/exynos/exynos_drm_ipp.c 	struct drm_pending_exynos_ipp_event *e = NULL;
e                 700 drivers/gpu/drm/exynos/exynos_drm_ipp.c 	e = kzalloc(sizeof(*e), GFP_KERNEL);
e                 701 drivers/gpu/drm/exynos/exynos_drm_ipp.c 	if (!e)
e                 704 drivers/gpu/drm/exynos/exynos_drm_ipp.c 	e->event.base.type = DRM_EXYNOS_IPP_EVENT;
e                 705 drivers/gpu/drm/exynos/exynos_drm_ipp.c 	e->event.base.length = sizeof(e->event);
e                 706 drivers/gpu/drm/exynos/exynos_drm_ipp.c 	e->event.user_data = user_data;
e                 708 drivers/gpu/drm/exynos/exynos_drm_ipp.c 	ret = drm_event_reserve_init(task->ipp->drm_dev, file_priv, &e->base,
e                 709 drivers/gpu/drm/exynos/exynos_drm_ipp.c 				     &e->event.base);
e                 713 drivers/gpu/drm/exynos/exynos_drm_ipp.c 	task->event = e;
e                 716 drivers/gpu/drm/exynos/exynos_drm_ipp.c 	kfree(e);
e                  50 drivers/gpu/drm/exynos/exynos_drm_vidi.c static inline struct vidi_context *encoder_to_vidi(struct drm_encoder *e)
e                  52 drivers/gpu/drm/exynos/exynos_drm_vidi.c 	return container_of(e, struct vidi_context, encoder);
e                 146 drivers/gpu/drm/exynos/exynos_hdmi.c static inline struct hdmi_context *encoder_to_hdmi(struct drm_encoder *e)
e                 148 drivers/gpu/drm/exynos/exynos_hdmi.c 	return container_of(e, struct hdmi_context, encoder);
e                17294 drivers/gpu/drm/i915/display/intel_display.c #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
e                 543 drivers/gpu/drm/i915/display/intel_display.h void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
e                  26 drivers/gpu/drm/i915/display/intel_overlay.h void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
e                 261 drivers/gpu/drm/i915/gem/i915_gem_context.c static void __free_engines(struct i915_gem_engines *e, unsigned int count)
e                 264 drivers/gpu/drm/i915/gem/i915_gem_context.c 		if (!e->engines[count])
e                 267 drivers/gpu/drm/i915/gem/i915_gem_context.c 		intel_context_put(e->engines[count]);
e                 269 drivers/gpu/drm/i915/gem/i915_gem_context.c 	kfree(e);
e                 272 drivers/gpu/drm/i915/gem/i915_gem_context.c static void free_engines(struct i915_gem_engines *e)
e                 274 drivers/gpu/drm/i915/gem/i915_gem_context.c 	__free_engines(e, e->num_engines);
e                 286 drivers/gpu/drm/i915/gem/i915_gem_context.c 	struct i915_gem_engines *e;
e                 289 drivers/gpu/drm/i915/gem/i915_gem_context.c 	e = kzalloc(struct_size(e, engines, I915_NUM_ENGINES), GFP_KERNEL);
e                 290 drivers/gpu/drm/i915/gem/i915_gem_context.c 	if (!e)
e                 293 drivers/gpu/drm/i915/gem/i915_gem_context.c 	init_rcu_head(&e->rcu);
e                 299 drivers/gpu/drm/i915/gem/i915_gem_context.c 			__free_engines(e, id);
e                 303 drivers/gpu/drm/i915/gem/i915_gem_context.c 		e->engines[id] = ce;
e                 304 drivers/gpu/drm/i915/gem/i915_gem_context.c 		e->num_engines = id + 1;
e                 307 drivers/gpu/drm/i915/gem/i915_gem_context.c 	return e;
e                 410 drivers/gpu/drm/i915/gem/i915_gem_context.c 	struct i915_gem_engines *e;
e                 425 drivers/gpu/drm/i915/gem/i915_gem_context.c 	e = default_engines(ctx);
e                 426 drivers/gpu/drm/i915/gem/i915_gem_context.c 	if (IS_ERR(e)) {
e                 427 drivers/gpu/drm/i915/gem/i915_gem_context.c 		err = PTR_ERR(e);
e                 430 drivers/gpu/drm/i915/gem/i915_gem_context.c 	RCU_INIT_POINTER(ctx->engines, e);
e                1646 drivers/gpu/drm/i915/gem/i915_gem_context.c __copy_engines(struct i915_gem_engines *e)
e                1651 drivers/gpu/drm/i915/gem/i915_gem_context.c 	copy = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
e                1656 drivers/gpu/drm/i915/gem/i915_gem_context.c 	for (n = 0; n < e->num_engines; n++) {
e                1657 drivers/gpu/drm/i915/gem/i915_gem_context.c 		if (e->engines[n])
e                1658 drivers/gpu/drm/i915/gem/i915_gem_context.c 			copy->engines[n] = intel_context_get(e->engines[n]);
e                1672 drivers/gpu/drm/i915/gem/i915_gem_context.c 	struct i915_gem_engines *e;
e                1680 drivers/gpu/drm/i915/gem/i915_gem_context.c 	e = NULL;
e                1682 drivers/gpu/drm/i915/gem/i915_gem_context.c 		e = __copy_engines(i915_gem_context_engines(ctx));
e                1684 drivers/gpu/drm/i915/gem/i915_gem_context.c 	if (IS_ERR_OR_NULL(e)) {
e                1686 drivers/gpu/drm/i915/gem/i915_gem_context.c 		return PTR_ERR_OR_ZERO(e);
e                1689 drivers/gpu/drm/i915/gem/i915_gem_context.c 	count = e->num_engines;
e                1728 drivers/gpu/drm/i915/gem/i915_gem_context.c 		if (e->engines[n]) {
e                1729 drivers/gpu/drm/i915/gem/i915_gem_context.c 			ci.engine_class = e->engines[n]->engine->uabi_class;
e                1730 drivers/gpu/drm/i915/gem/i915_gem_context.c 			ci.engine_instance = e->engines[n]->engine->uabi_instance;
e                1742 drivers/gpu/drm/i915/gem/i915_gem_context.c 	free_engines(e);
e                1854 drivers/gpu/drm/i915/gem/i915_gem_context.c 	struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
e                1859 drivers/gpu/drm/i915/gem/i915_gem_context.c 	clone = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
e                1864 drivers/gpu/drm/i915/gem/i915_gem_context.c 	for (n = 0; n < e->num_engines; n++) {
e                1867 drivers/gpu/drm/i915/gem/i915_gem_context.c 		if (!e->engines[n]) {
e                1871 drivers/gpu/drm/i915/gem/i915_gem_context.c 		engine = e->engines[n]->engine;
e                1927 drivers/gpu/drm/i915/gem/i915_gem_context.c 	struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
e                1933 drivers/gpu/drm/i915/gem/i915_gem_context.c 	if (e->num_engines != clone->num_engines) {
e                1938 drivers/gpu/drm/i915/gem/i915_gem_context.c 	for (n = 0; n < e->num_engines; n++) {
e                1939 drivers/gpu/drm/i915/gem/i915_gem_context.c 		struct intel_context *ce = e->engines[n];
e                2370 drivers/gpu/drm/i915/gem/i915_gem_context.c 	const struct i915_gem_engines *e = it->engines;
e                2374 drivers/gpu/drm/i915/gem/i915_gem_context.c 		if (it->idx >= e->num_engines)
e                2377 drivers/gpu/drm/i915/gem/i915_gem_context.c 		ctx = e->engines[it->idx++];
e                 204 drivers/gpu/drm/i915/gem/i915_gem_context.h 		struct i915_gem_engines *e = rcu_dereference(ctx->engines);
e                 205 drivers/gpu/drm/i915/gem/i915_gem_context.h 		if (likely(idx < e->num_engines && e->engines[idx]))
e                 206 drivers/gpu/drm/i915/gem/i915_gem_context.h 			ce = intel_context_get(e->engines[idx]);
e                  15 drivers/gpu/drm/i915/gem/selftests/mock_context.c 	struct i915_gem_engines *e;
e                  27 drivers/gpu/drm/i915/gem/selftests/mock_context.c 	e = default_engines(ctx);
e                  28 drivers/gpu/drm/i915/gem/selftests/mock_context.c 	if (IS_ERR(e))
e                  30 drivers/gpu/drm/i915/gem/selftests/mock_context.c 	RCU_INIT_POINTER(ctx->engines, e);
e                 217 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c 				      int e)
e                 219 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c 	bool user = e == -EINVAL;
e                 221 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c 	if (i915_inject_load_error(i915, e)) {
e                 225 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c 	} else if (i915_inject_load_error(i915, e)) {
e                 230 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c 	} else if (i915_inject_load_error(i915, e)) {
e                 234 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c 	} else if (uc_fw->major_ver_wanted && i915_inject_load_error(i915, e)) {
e                 239 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c 	} else if (uc_fw->minor_ver_wanted && i915_inject_load_error(i915, e)) {
e                 243 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c 	} else if (user && i915_inject_load_error(i915, e)) {
e                 650 drivers/gpu/drm/i915/gvt/cmd_parser.c 	struct cmd_entry *e;
e                 652 drivers/gpu/drm/i915/gvt/cmd_parser.c 	hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
e                 653 drivers/gpu/drm/i915/gvt/cmd_parser.c 		if (opcode == e->info->opcode && e->info->rings & BIT(ring_id))
e                 654 drivers/gpu/drm/i915/gvt/cmd_parser.c 			return e->info;
e                2648 drivers/gpu/drm/i915/gvt/cmd_parser.c static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
e                2650 drivers/gpu/drm/i915/gvt/cmd_parser.c 	hash_add(gvt->cmd_table, &e->hlist, e->info->opcode);
e                3047 drivers/gpu/drm/i915/gvt/cmd_parser.c 	struct cmd_entry *e;
e                3057 drivers/gpu/drm/i915/gvt/cmd_parser.c 		e = kzalloc(sizeof(*e), GFP_KERNEL);
e                3058 drivers/gpu/drm/i915/gvt/cmd_parser.c 		if (!e)
e                3061 drivers/gpu/drm/i915/gvt/cmd_parser.c 		e->info = &cmd_info[i];
e                3063 drivers/gpu/drm/i915/gvt/cmd_parser.c 				e->info->opcode, e->info->rings);
e                3065 drivers/gpu/drm/i915/gvt/cmd_parser.c 			gvt_err("%s %s duplicated\n", e->info->name,
e                3067 drivers/gpu/drm/i915/gvt/cmd_parser.c 			kfree(e);
e                3073 drivers/gpu/drm/i915/gvt/cmd_parser.c 		INIT_HLIST_NODE(&e->hlist);
e                3074 drivers/gpu/drm/i915/gvt/cmd_parser.c 		add_cmd_entry(gvt, e);
e                3076 drivers/gpu/drm/i915/gvt/cmd_parser.c 				e->info->name, e->info->opcode, e->info->flag,
e                3077 drivers/gpu/drm/i915/gvt/cmd_parser.c 				e->info->devices, e->info->rings);
e                3085 drivers/gpu/drm/i915/gvt/cmd_parser.c 	struct cmd_entry *e;
e                3088 drivers/gpu/drm/i915/gvt/cmd_parser.c 	hash_for_each_safe(gvt->cmd_table, i, tmp, e, hlist)
e                3089 drivers/gpu/drm/i915/gvt/cmd_parser.c 		kfree(e);
e                 147 drivers/gpu/drm/i915/gvt/gtt.c #define gtt_init_entry(e, t, p, v) do { \
e                 148 drivers/gpu/drm/i915/gvt/gtt.c 	(e)->type = t; \
e                 149 drivers/gpu/drm/i915/gvt/gtt.c 	(e)->pdev = p; \
e                 150 drivers/gpu/drm/i915/gvt/gtt.c 	memcpy(&(e)->val64, &v, sizeof(v)); \
e                 301 drivers/gpu/drm/i915/gvt/gtt.c 		struct intel_gvt_gtt_entry *e,
e                 314 drivers/gpu/drm/i915/gvt/gtt.c 				&e->val64, 8);
e                 318 drivers/gpu/drm/i915/gvt/gtt.c 		e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
e                 320 drivers/gpu/drm/i915/gvt/gtt.c 		e->val64 = *((u64 *)pt + index);
e                 326 drivers/gpu/drm/i915/gvt/gtt.c 		struct intel_gvt_gtt_entry *e,
e                 339 drivers/gpu/drm/i915/gvt/gtt.c 				&e->val64, 8);
e                 343 drivers/gpu/drm/i915/gvt/gtt.c 		write_pte64(vgpu->gvt->dev_priv, index, e->val64);
e                 345 drivers/gpu/drm/i915/gvt/gtt.c 		*((u64 *)pt + index) = e->val64;
e                 362 drivers/gpu/drm/i915/gvt/gtt.c static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
e                 366 drivers/gpu/drm/i915/gvt/gtt.c 	if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY)
e                 367 drivers/gpu/drm/i915/gvt/gtt.c 		pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT;
e                 368 drivers/gpu/drm/i915/gvt/gtt.c 	else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
e                 369 drivers/gpu/drm/i915/gvt/gtt.c 		pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT;
e                 370 drivers/gpu/drm/i915/gvt/gtt.c 	else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY)
e                 371 drivers/gpu/drm/i915/gvt/gtt.c 		pfn = (e->val64 & ADDR_64K_MASK) >> PAGE_SHIFT;
e                 373 drivers/gpu/drm/i915/gvt/gtt.c 		pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT;
e                 377 drivers/gpu/drm/i915/gvt/gtt.c static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
e                 379 drivers/gpu/drm/i915/gvt/gtt.c 	if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
e                 380 drivers/gpu/drm/i915/gvt/gtt.c 		e->val64 &= ~ADDR_1G_MASK;
e                 382 drivers/gpu/drm/i915/gvt/gtt.c 	} else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
e                 383 drivers/gpu/drm/i915/gvt/gtt.c 		e->val64 &= ~ADDR_2M_MASK;
e                 385 drivers/gpu/drm/i915/gvt/gtt.c 	} else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) {
e                 386 drivers/gpu/drm/i915/gvt/gtt.c 		e->val64 &= ~ADDR_64K_MASK;
e                 389 drivers/gpu/drm/i915/gvt/gtt.c 		e->val64 &= ~ADDR_4K_MASK;
e                 393 drivers/gpu/drm/i915/gvt/gtt.c 	e->val64 |= (pfn << PAGE_SHIFT);
e                 396 drivers/gpu/drm/i915/gvt/gtt.c static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
e                 398 drivers/gpu/drm/i915/gvt/gtt.c 	return !!(e->val64 & _PAGE_PSE);
e                 401 drivers/gpu/drm/i915/gvt/gtt.c static void gen8_gtt_clear_pse(struct intel_gvt_gtt_entry *e)
e                 403 drivers/gpu/drm/i915/gvt/gtt.c 	if (gen8_gtt_test_pse(e)) {
e                 404 drivers/gpu/drm/i915/gvt/gtt.c 		switch (e->type) {
e                 406 drivers/gpu/drm/i915/gvt/gtt.c 			e->val64 &= ~_PAGE_PSE;
e                 407 drivers/gpu/drm/i915/gvt/gtt.c 			e->type = GTT_TYPE_PPGTT_PDE_ENTRY;
e                 410 drivers/gpu/drm/i915/gvt/gtt.c 			e->type = GTT_TYPE_PPGTT_PDP_ENTRY;
e                 411 drivers/gpu/drm/i915/gvt/gtt.c 			e->val64 &= ~_PAGE_PSE;
e                 419 drivers/gpu/drm/i915/gvt/gtt.c static bool gen8_gtt_test_ips(struct intel_gvt_gtt_entry *e)
e                 421 drivers/gpu/drm/i915/gvt/gtt.c 	if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
e                 424 drivers/gpu/drm/i915/gvt/gtt.c 	return !!(e->val64 & GEN8_PDE_IPS_64K);
e                 427 drivers/gpu/drm/i915/gvt/gtt.c static void gen8_gtt_clear_ips(struct intel_gvt_gtt_entry *e)
e                 429 drivers/gpu/drm/i915/gvt/gtt.c 	if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
e                 432 drivers/gpu/drm/i915/gvt/gtt.c 	e->val64 &= ~GEN8_PDE_IPS_64K;
e                 435 drivers/gpu/drm/i915/gvt/gtt.c static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
e                 442 drivers/gpu/drm/i915/gvt/gtt.c 	if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY
e                 443 drivers/gpu/drm/i915/gvt/gtt.c 			|| e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
e                 444 drivers/gpu/drm/i915/gvt/gtt.c 		return (e->val64 != 0);
e                 446 drivers/gpu/drm/i915/gvt/gtt.c 		return (e->val64 & _PAGE_PRESENT);
e                 449 drivers/gpu/drm/i915/gvt/gtt.c static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
e                 451 drivers/gpu/drm/i915/gvt/gtt.c 	e->val64 &= ~_PAGE_PRESENT;
e                 454 drivers/gpu/drm/i915/gvt/gtt.c static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
e                 456 drivers/gpu/drm/i915/gvt/gtt.c 	e->val64 |= _PAGE_PRESENT;
e                 459 drivers/gpu/drm/i915/gvt/gtt.c static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e)
e                 461 drivers/gpu/drm/i915/gvt/gtt.c 	return !!(e->val64 & GTT_SPTE_FLAG_64K_SPLITED);
e                 464 drivers/gpu/drm/i915/gvt/gtt.c static void gen8_gtt_set_64k_splited(struct intel_gvt_gtt_entry *e)
e                 466 drivers/gpu/drm/i915/gvt/gtt.c 	e->val64 |= GTT_SPTE_FLAG_64K_SPLITED;
e                 469 drivers/gpu/drm/i915/gvt/gtt.c static void gen8_gtt_clear_64k_splited(struct intel_gvt_gtt_entry *e)
e                 471 drivers/gpu/drm/i915/gvt/gtt.c 	e->val64 &= ~GTT_SPTE_FLAG_64K_SPLITED;
e                 648 drivers/gpu/drm/i915/gvt/gtt.c 		struct intel_gvt_gtt_entry *e, unsigned long index,
e                 655 drivers/gpu/drm/i915/gvt/gtt.c 	e->type = get_entry_type(type);
e                 657 drivers/gpu/drm/i915/gvt/gtt.c 	if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
e                 660 drivers/gpu/drm/i915/gvt/gtt.c 	ret = ops->get_entry(page_table, e, index, guest,
e                 666 drivers/gpu/drm/i915/gvt/gtt.c 	update_entry_type_for_real(ops, e, guest ?
e                 670 drivers/gpu/drm/i915/gvt/gtt.c 		    type, e->type, index, e->val64);
e                 677 drivers/gpu/drm/i915/gvt/gtt.c 		struct intel_gvt_gtt_entry *e, unsigned long index,
e                 683 drivers/gpu/drm/i915/gvt/gtt.c 	if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
e                 687 drivers/gpu/drm/i915/gvt/gtt.c 		    type, e->type, index, e->val64);
e                 689 drivers/gpu/drm/i915/gvt/gtt.c 	return ops->set_entry(page_table, e, index, guest,
e                 694 drivers/gpu/drm/i915/gvt/gtt.c #define ppgtt_get_guest_entry(spt, e, index) \
e                 696 drivers/gpu/drm/i915/gvt/gtt.c 		spt->guest_page.type, e, index, true)
e                 698 drivers/gpu/drm/i915/gvt/gtt.c #define ppgtt_set_guest_entry(spt, e, index) \
e                 700 drivers/gpu/drm/i915/gvt/gtt.c 		spt->guest_page.type, e, index, true)
e                 702 drivers/gpu/drm/i915/gvt/gtt.c #define ppgtt_get_shadow_entry(spt, e, index) \
e                 704 drivers/gpu/drm/i915/gvt/gtt.c 		spt->shadow_page.type, e, index, false)
e                 706 drivers/gpu/drm/i915/gvt/gtt.c #define ppgtt_set_shadow_entry(spt, e, index) \
e                 708 drivers/gpu/drm/i915/gvt/gtt.c 		spt->shadow_page.type, e, index, false)
e                 905 drivers/gpu/drm/i915/gvt/gtt.c #define for_each_present_guest_entry(spt, e, i) \
e                 908 drivers/gpu/drm/i915/gvt/gtt.c 		if (!ppgtt_get_guest_entry(spt, e, i) && \
e                 909 drivers/gpu/drm/i915/gvt/gtt.c 		    spt->vgpu->gvt->gtt.pte_ops->test_present(e))
e                 911 drivers/gpu/drm/i915/gvt/gtt.c #define for_each_present_shadow_entry(spt, e, i) \
e                 914 drivers/gpu/drm/i915/gvt/gtt.c 		if (!ppgtt_get_shadow_entry(spt, e, i) && \
e                 915 drivers/gpu/drm/i915/gvt/gtt.c 		    spt->vgpu->gvt->gtt.pte_ops->test_present(e))
e                 917 drivers/gpu/drm/i915/gvt/gtt.c #define for_each_shadow_entry(spt, e, i) \
e                 920 drivers/gpu/drm/i915/gvt/gtt.c 		if (!ppgtt_get_shadow_entry(spt, e, i))
e                 941 drivers/gpu/drm/i915/gvt/gtt.c 		struct intel_gvt_gtt_entry *e)
e                 947 drivers/gpu/drm/i915/gvt/gtt.c 	GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type)));
e                 949 drivers/gpu/drm/i915/gvt/gtt.c 	if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
e                 950 drivers/gpu/drm/i915/gvt/gtt.c 		&& e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
e                 951 drivers/gpu/drm/i915/gvt/gtt.c 		cur_pt_type = get_next_pt_type(e->type);
e                 961 drivers/gpu/drm/i915/gvt/gtt.c 		if (ops->get_pfn(e) ==
e                 965 drivers/gpu/drm/i915/gvt/gtt.c 	s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
e                 968 drivers/gpu/drm/i915/gvt/gtt.c 				ops->get_pfn(e));
e                 995 drivers/gpu/drm/i915/gvt/gtt.c 	struct intel_gvt_gtt_entry e;
e                1005 drivers/gpu/drm/i915/gvt/gtt.c 	for_each_present_shadow_entry(spt, &e, index) {
e                1006 drivers/gpu/drm/i915/gvt/gtt.c 		switch (e.type) {
e                1009 drivers/gpu/drm/i915/gvt/gtt.c 			ppgtt_invalidate_pte(spt, &e);
e                1026 drivers/gpu/drm/i915/gvt/gtt.c 					spt->vgpu, &e);
e                1041 drivers/gpu/drm/i915/gvt/gtt.c 			spt, e.val64, e.type);
e                2040 drivers/gpu/drm/i915/gvt/gtt.c 		struct intel_gvt_gtt_entry *e, unsigned long index, bool guest)
e                2046 drivers/gpu/drm/i915/gvt/gtt.c 	s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
e                2051 drivers/gpu/drm/i915/gvt/gtt.c 		ppgtt_get_shadow_entry(s, e, index);
e                2053 drivers/gpu/drm/i915/gvt/gtt.c 		ppgtt_get_guest_entry(s, e, index);
e                2076 drivers/gpu/drm/i915/gvt/gtt.c 	struct intel_gvt_gtt_entry e;
e                2087 drivers/gpu/drm/i915/gvt/gtt.c 		ggtt_get_guest_entry(mm, &e,
e                2090 drivers/gpu/drm/i915/gvt/gtt.c 		gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
e                2097 drivers/gpu/drm/i915/gvt/gtt.c 			ppgtt_get_shadow_root_entry(mm, &e, 0);
e                2106 drivers/gpu/drm/i915/gvt/gtt.c 			ppgtt_get_shadow_root_entry(mm, &e,
e                2119 drivers/gpu/drm/i915/gvt/gtt.c 			ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
e                2124 drivers/gpu/drm/i915/gvt/gtt.c 			if (!pte_ops->test_present(&e)) {
e                2130 drivers/gpu/drm/i915/gvt/gtt.c 		gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) +
e                2149 drivers/gpu/drm/i915/gvt/gtt.c 	struct intel_gvt_gtt_entry e;
e                2162 drivers/gpu/drm/i915/gvt/gtt.c 	ggtt_get_guest_entry(ggtt_mm, &e, index);
e                2163 drivers/gpu/drm/i915/gvt/gtt.c 	memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
e                2215 drivers/gpu/drm/i915/gvt/gtt.c 	struct intel_gvt_gtt_entry e = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
e                2231 drivers/gpu/drm/i915/gvt/gtt.c 	e.type = GTT_TYPE_GGTT_PTE;
e                2232 drivers/gpu/drm/i915/gvt/gtt.c 	memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
e                2251 drivers/gpu/drm/i915/gvt/gtt.c 					memcpy((void *)&e.val64 + last_off,
e                2262 drivers/gpu/drm/i915/gvt/gtt.c 				pos->data = e.val64;
e                2263 drivers/gpu/drm/i915/gvt/gtt.c 				ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
e                2274 drivers/gpu/drm/i915/gvt/gtt.c 			partial_pte->data = e.val64;
e                2281 drivers/gpu/drm/i915/gvt/gtt.c 	if (!partial_update && (ops->test_present(&e))) {
e                2282 drivers/gpu/drm/i915/gvt/gtt.c 		gfn = ops->get_pfn(&e);
e                2283 drivers/gpu/drm/i915/gvt/gtt.c 		m.val64 = e.val64;
e                2284 drivers/gpu/drm/i915/gvt/gtt.c 		m.type = e.type;
e                2311 drivers/gpu/drm/i915/gvt/gtt.c 	ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
e                2313 drivers/gpu/drm/i915/gvt/gtt.c 	ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index);
e                2314 drivers/gpu/drm/i915/gvt/gtt.c 	ggtt_invalidate_pte(vgpu, &e);
e                  50 drivers/gpu/drm/i915/gvt/gtt.h 			 struct intel_gvt_gtt_entry *e,
e                  56 drivers/gpu/drm/i915/gvt/gtt.h 			 struct intel_gvt_gtt_entry *e,
e                  61 drivers/gpu/drm/i915/gvt/gtt.h 	bool (*test_present)(struct intel_gvt_gtt_entry *e);
e                  62 drivers/gpu/drm/i915/gvt/gtt.h 	void (*clear_present)(struct intel_gvt_gtt_entry *e);
e                  63 drivers/gpu/drm/i915/gvt/gtt.h 	void (*set_present)(struct intel_gvt_gtt_entry *e);
e                  64 drivers/gpu/drm/i915/gvt/gtt.h 	bool (*test_pse)(struct intel_gvt_gtt_entry *e);
e                  65 drivers/gpu/drm/i915/gvt/gtt.h 	void (*clear_pse)(struct intel_gvt_gtt_entry *e);
e                  66 drivers/gpu/drm/i915/gvt/gtt.h 	bool (*test_ips)(struct intel_gvt_gtt_entry *e);
e                  67 drivers/gpu/drm/i915/gvt/gtt.h 	void (*clear_ips)(struct intel_gvt_gtt_entry *e);
e                  68 drivers/gpu/drm/i915/gvt/gtt.h 	bool (*test_64k_splited)(struct intel_gvt_gtt_entry *e);
e                  69 drivers/gpu/drm/i915/gvt/gtt.h 	void (*clear_64k_splited)(struct intel_gvt_gtt_entry *e);
e                  70 drivers/gpu/drm/i915/gvt/gtt.h 	void (*set_64k_splited)(struct intel_gvt_gtt_entry *e);
e                  71 drivers/gpu/drm/i915/gvt/gtt.h 	void (*set_pfn)(struct intel_gvt_gtt_entry *e, unsigned long pfn);
e                  72 drivers/gpu/drm/i915/gvt/gtt.h 	unsigned long (*get_pfn)(struct intel_gvt_gtt_entry *e);
e                  87 drivers/gpu/drm/i915/gvt/handlers.c 	struct intel_gvt_mmio_info *e;
e                  89 drivers/gpu/drm/i915/gvt/handlers.c 	hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
e                  90 drivers/gpu/drm/i915/gvt/handlers.c 		if (e->offset == offset)
e                  91 drivers/gpu/drm/i915/gvt/handlers.c 			return e;
e                3316 drivers/gpu/drm/i915/gvt/handlers.c 	struct intel_gvt_mmio_info *e;
e                3319 drivers/gpu/drm/i915/gvt/handlers.c 	hash_for_each_safe(gvt->mmio.mmio_info_table, i, tmp, e, node)
e                3320 drivers/gpu/drm/i915/gvt/handlers.c 		kfree(e);
e                3410 drivers/gpu/drm/i915/gvt/handlers.c 	struct intel_gvt_mmio_info *e;
e                3413 drivers/gpu/drm/i915/gvt/handlers.c 	hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
e                3414 drivers/gpu/drm/i915/gvt/handlers.c 		ret = handler(gvt, e->offset, data);
e                  45 drivers/gpu/drm/i915/gvt/interrupt.c #define get_event_virt_handler(irq, e)	(irq->events[e].v_handler)
e                  46 drivers/gpu/drm/i915/gvt/interrupt.c #define get_irq_info(irq, e)		(irq->events[e].info)
e                 496 drivers/gpu/drm/i915/gvt/interrupt.c #define SET_BIT_INFO(s, b, e, i)		\
e                 498 drivers/gpu/drm/i915/gvt/interrupt.c 		s->events[e].bit = b;		\
e                 499 drivers/gpu/drm/i915/gvt/interrupt.c 		s->events[e].info = s->info[i];	\
e                 500 drivers/gpu/drm/i915/gvt/interrupt.c 		s->info[i]->bit_to_event[b] = e;\
e                1845 drivers/gpu/drm/i915/i915_drv.h #define INTEL_GEN_MASK(s, e) ( \
e                1847 drivers/gpu/drm/i915/i915_drv.h 	BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \
e                1848 drivers/gpu/drm/i915/i915_drv.h 	GENMASK((e) - 1, (s) - 1))
e                1851 drivers/gpu/drm/i915/i915_drv.h #define IS_GEN_RANGE(dev_priv, s, e) \
e                1852 drivers/gpu/drm/i915/i915_drv.h 	(!!(INTEL_INFO(dev_priv)->gen_mask & INTEL_GEN_MASK((s), (e))))
e                  62 drivers/gpu/drm/i915/i915_gpu_error.c static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
e                  67 drivers/gpu/drm/i915/i915_gpu_error.c 	if (e->bytes + len + 1 <= e->size)
e                  70 drivers/gpu/drm/i915/i915_gpu_error.c 	if (e->bytes) {
e                  71 drivers/gpu/drm/i915/i915_gpu_error.c 		__sg_set_buf(e->cur++, e->buf, e->bytes, e->iter);
e                  72 drivers/gpu/drm/i915/i915_gpu_error.c 		e->iter += e->bytes;
e                  73 drivers/gpu/drm/i915/i915_gpu_error.c 		e->buf = NULL;
e                  74 drivers/gpu/drm/i915/i915_gpu_error.c 		e->bytes = 0;
e                  77 drivers/gpu/drm/i915/i915_gpu_error.c 	if (e->cur == e->end) {
e                  82 drivers/gpu/drm/i915/i915_gpu_error.c 			e->err = -ENOMEM;
e                  86 drivers/gpu/drm/i915/i915_gpu_error.c 		if (e->cur) {
e                  87 drivers/gpu/drm/i915/i915_gpu_error.c 			e->cur->offset = 0;
e                  88 drivers/gpu/drm/i915/i915_gpu_error.c 			e->cur->length = 0;
e                  89 drivers/gpu/drm/i915/i915_gpu_error.c 			e->cur->page_link =
e                  92 drivers/gpu/drm/i915/i915_gpu_error.c 			e->sgl = sgl;
e                  95 drivers/gpu/drm/i915/i915_gpu_error.c 		e->cur = sgl;
e                  96 drivers/gpu/drm/i915/i915_gpu_error.c 		e->end = sgl + SG_MAX_SINGLE_ALLOC - 1;
e                  99 drivers/gpu/drm/i915/i915_gpu_error.c 	e->size = ALIGN(len + 1, SZ_64K);
e                 100 drivers/gpu/drm/i915/i915_gpu_error.c 	e->buf = kmalloc(e->size, ALLOW_FAIL);
e                 101 drivers/gpu/drm/i915/i915_gpu_error.c 	if (!e->buf) {
e                 102 drivers/gpu/drm/i915/i915_gpu_error.c 		e->size = PAGE_ALIGN(len + 1);
e                 103 drivers/gpu/drm/i915/i915_gpu_error.c 		e->buf = kmalloc(e->size, GFP_KERNEL);
e                 105 drivers/gpu/drm/i915/i915_gpu_error.c 	if (!e->buf) {
e                 106 drivers/gpu/drm/i915/i915_gpu_error.c 		e->err = -ENOMEM;
e                 114 drivers/gpu/drm/i915/i915_gpu_error.c static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
e                 120 drivers/gpu/drm/i915/i915_gpu_error.c 	if (e->err)
e                 127 drivers/gpu/drm/i915/i915_gpu_error.c 		e->err = len;
e                 131 drivers/gpu/drm/i915/i915_gpu_error.c 	if (!__i915_error_grow(e, len))
e                 134 drivers/gpu/drm/i915/i915_gpu_error.c 	GEM_BUG_ON(e->bytes >= e->size);
e                 135 drivers/gpu/drm/i915/i915_gpu_error.c 	len = vscnprintf(e->buf + e->bytes, e->size - e->bytes, fmt, args);
e                 137 drivers/gpu/drm/i915/i915_gpu_error.c 		e->err = len;
e                 140 drivers/gpu/drm/i915/i915_gpu_error.c 	e->bytes += len;
e                 143 drivers/gpu/drm/i915/i915_gpu_error.c static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str)
e                 147 drivers/gpu/drm/i915/i915_gpu_error.c 	if (e->err || !str)
e                 151 drivers/gpu/drm/i915/i915_gpu_error.c 	if (!__i915_error_grow(e, len))
e                 154 drivers/gpu/drm/i915/i915_gpu_error.c 	GEM_BUG_ON(e->bytes + len > e->size);
e                 155 drivers/gpu/drm/i915/i915_gpu_error.c 	memcpy(e->buf + e->bytes, str, len);
e                 156 drivers/gpu/drm/i915/i915_gpu_error.c 	e->bytes += len;
e                 159 drivers/gpu/drm/i915/i915_gpu_error.c #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
e                 160 drivers/gpu/drm/i915/i915_gpu_error.c #define err_puts(e, s) i915_error_puts(e, s)
e                 168 drivers/gpu/drm/i915/i915_gpu_error.c i915_error_printer(struct drm_i915_error_state_buf *e)
e                 172 drivers/gpu/drm/i915/i915_gpu_error.c 		.arg = e,
e                 550 drivers/gpu/drm/i915/i915_gpu_error.c void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
e                 555 drivers/gpu/drm/i915/i915_gpu_error.c 	i915_error_vprintf(e, f, args);
e                1249 drivers/gpu/drm/i915/i915_gpu_error.c static bool record_context(struct drm_i915_error_context *e,
e                1260 drivers/gpu/drm/i915/i915_gpu_error.c 			strcpy(e->comm, task->comm);
e                1261 drivers/gpu/drm/i915/i915_gpu_error.c 			e->pid = task->pid;
e                1266 drivers/gpu/drm/i915/i915_gpu_error.c 	e->hw_id = ctx->hw_id;
e                1267 drivers/gpu/drm/i915/i915_gpu_error.c 	e->sched_attr = ctx->sched;
e                1268 drivers/gpu/drm/i915/i915_gpu_error.c 	e->guilty = atomic_read(&ctx->guilty_count);
e                1269 drivers/gpu/drm/i915/i915_gpu_error.c 	e->active = atomic_read(&ctx->active_count);
e                 199 drivers/gpu/drm/i915/i915_gpu_error.h void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
e                 878 drivers/gpu/drm/i915/intel_uncore.c #define GEN_FW_RANGE(s, e, d) \
e                 879 drivers/gpu/drm/i915/intel_uncore.c 	{ .start = (s), .end = (e), .domains = (d) }
e                  31 drivers/gpu/drm/imx/dw_hdmi-imx.c static inline struct imx_hdmi *enc_to_imx_hdmi(struct drm_encoder *e)
e                  33 drivers/gpu/drm/imx/dw_hdmi-imx.c 	return container_of(e, struct imx_hdmi, encoder);
e                  75 drivers/gpu/drm/imx/imx-ldb.c static inline struct imx_ldb_channel *enc_to_imx_ldb_ch(struct drm_encoder *e)
e                  77 drivers/gpu/drm/imx/imx-ldb.c 	return container_of(e, struct imx_ldb_channel, encoder);
e                 126 drivers/gpu/drm/imx/imx-tve.c static inline struct imx_tve *enc_to_tve(struct drm_encoder *e)
e                 128 drivers/gpu/drm/imx/imx-tve.c 	return container_of(e, struct imx_tve, encoder);
e                  41 drivers/gpu/drm/imx/parallel-display.c static inline struct imx_parallel_display *enc_to_imxpd(struct drm_encoder *e)
e                  43 drivers/gpu/drm/imx/parallel-display.c 	return container_of(e, struct imx_parallel_display, encoder);
e                  79 drivers/gpu/drm/mediatek/mtk_dpi.c static inline struct mtk_dpi *mtk_dpi_from_encoder(struct drm_encoder *e)
e                  81 drivers/gpu/drm/mediatek/mtk_dpi.c 	return container_of(e, struct mtk_dpi, encoder);
e                 181 drivers/gpu/drm/mediatek/mtk_dsi.c static inline struct mtk_dsi *encoder_to_dsi(struct drm_encoder *e)
e                 183 drivers/gpu/drm/mediatek/mtk_dsi.c 	return container_of(e, struct mtk_dsi, encoder);
e                  29 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c #define DPU_DEBUG_ENC(e, fmt, ...) DPU_DEBUG("enc%d " fmt,\
e                  30 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c 		(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
e                  32 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c #define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
e                  33 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c 		(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
e                  13 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c #define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
e                  14 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c 		(e) && (e)->base.parent ? \
e                  15 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c 		(e)->base.parent->base.id : -1, \
e                  16 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c 		(e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
e                  18 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c #define DPU_ERROR_CMDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
e                  19 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c 		(e) && (e)->base.parent ? \
e                  20 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c 		(e)->base.parent->base.id : -1, \
e                  21 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c 		(e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
e                  12 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c #define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
e                  13 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c 		(e) && (e)->parent ? \
e                  14 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c 		(e)->parent->base.id : -1, \
e                  15 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c 		(e) && (e)->hw_intf ? \
e                  16 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c 		(e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
e                  18 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c #define DPU_ERROR_VIDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
e                  19 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c 		(e) && (e)->parent ? \
e                  20 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c 		(e)->parent->base.id : -1, \
e                  21 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c 		(e) && (e)->hw_intf ? \
e                  22 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c 		(e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
e                  40 drivers/gpu/drm/nouveau/nouveau_usif.c 	} e;
e                  96 drivers/gpu/drm/nouveau/nouveau_usif.c 	memcpy(&ntfy->p->e.data[0], header, length);
e                  97 drivers/gpu/drm/nouveau/nouveau_usif.c 	memcpy(&ntfy->p->e.data[length], data, size);
e                 100 drivers/gpu/drm/nouveau/nouveau_usif.c 		struct nvif_notify_rep_v0 *rep = (void *)ntfy->p->e.data;
e                 111 drivers/gpu/drm/nouveau/nouveau_usif.c 	if (!WARN_ON(filp->event_space < ntfy->p->e.base.length)) {
e                 113 drivers/gpu/drm/nouveau/nouveau_usif.c 		filp->event_space -= ntfy->p->e.base.length;
e                 212 drivers/gpu/drm/nouveau/nouveau_usif.c 	ntfy->p->base.event = &ntfy->p->e.base;
e                 214 drivers/gpu/drm/nouveau/nouveau_usif.c 	ntfy->p->e.base.type = DRM_NOUVEAU_EVENT_NVIF;
e                 215 drivers/gpu/drm/nouveau/nouveau_usif.c 	ntfy->p->e.base.length = sizeof(ntfy->p->e.base) + ntfy->reply;
e                 117 drivers/gpu/drm/nouveau/nvkm/core/mm.c 	u32 s, e;
e                 126 drivers/gpu/drm/nouveau/nvkm/core/mm.c 		e = this->offset + this->length;
e                 135 drivers/gpu/drm/nouveau/nvkm/core/mm.c 			e = rounddown(e, mm->block_size);
e                 138 drivers/gpu/drm/nouveau/nvkm/core/mm.c 		e &= ~mask;
e                 139 drivers/gpu/drm/nouveau/nvkm/core/mm.c 		if (s > e || e - s < size_min)
e                 146 drivers/gpu/drm/nouveau/nvkm/core/mm.c 		this = region_head(mm, this, min(size_max, e - s));
e                 195 drivers/gpu/drm/nouveau/nvkm/core/mm.c 		u32 e = this->offset + this->length;
e                 209 drivers/gpu/drm/nouveau/nvkm/core/mm.c 			e = rounddown(e, mm->block_size);
e                 210 drivers/gpu/drm/nouveau/nvkm/core/mm.c 			c = next->offset - e;
e                 214 drivers/gpu/drm/nouveau/nvkm/core/mm.c 		a = e - s;
e                 215 drivers/gpu/drm/nouveau/nvkm/core/mm.c 		if (s > e || a < size_min)
e                 219 drivers/gpu/drm/nouveau/nvkm/core/mm.c 		s  = (e - a) & ~mask;
e                 220 drivers/gpu/drm/nouveau/nvkm/core/mm.c 		c += (e - s) - a;
e                  19 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h #define mmio_refn(a,b,c,d,e) gf100_grctx_mmio_item((a), (b), (c), (d), (e))
e                  77 drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c 		     u8 idx, struct nvbios_vpstate_entry *e)
e                  81 drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c 	if (!e || !h || idx > h->ecount)
e                  85 drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c 	e->pstate    = nvbios_rd08(b, offset);
e                  86 drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c 	e->clock_mhz = nvbios_rd16(b, offset + 0x5);
e                   7 drivers/gpu/drm/nouveau/nvkm/subdev/clk/seq.h #define clk_exec(s,e)       hwsq_exec(&(s)->base, (e))
e                 165 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h #define ram_exec(s,e)        ramfuc_exec(&(s)->base, (e))
e                   7 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h #define ram_exec(s,e)       hwsq_exec(&(s)->base, (e))
e                1099 drivers/gpu/drm/omapdrm/dss/dsi.c 	u32 e;
e                1102 drivers/gpu/drm/omapdrm/dss/dsi.c 	e = dsi->errors;
e                1105 drivers/gpu/drm/omapdrm/dss/dsi.c 	return e;
e                2003 drivers/gpu/drm/radeon/cikd.h #define SDMA_PACKET(op, sub_op, e)	((((e) & 0xFFFF) << 16) |	\
e                2708 drivers/gpu/drm/radeon/radeon.h #define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e))
e                2728 drivers/gpu/drm/radeon/radeon.h #define radeon_set_backlight_level(rdev, e, l) (rdev)->asic->display.set_backlight_level((e), (l))
e                2729 drivers/gpu/drm/radeon/radeon.h #define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e))
e                2730 drivers/gpu/drm/radeon/radeon.h #define radeon_hdmi_enable(rdev, e, b) (rdev)->asic->display.hdmi_enable((e), (b))
e                2731 drivers/gpu/drm/radeon/radeon.h #define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m))
e                2741 drivers/gpu/drm/radeon/radeon.h #define radeon_set_engine_clock(rdev, e) (rdev)->asic->pm.set_engine_clock((rdev), (e))
e                2743 drivers/gpu/drm/radeon/radeon.h #define radeon_set_memory_clock(rdev, e) (rdev)->asic->pm.set_memory_clock((rdev), (e))
e                2746 drivers/gpu/drm/radeon/radeon.h #define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e))
e                2787 drivers/gpu/drm/radeon/radeon.h #define radeon_dpm_enable_bapm(rdev, e) rdev->asic->dpm.enable_bapm((rdev), (e))
e                  22 drivers/gpu/drm/rcar-du/rcar_du_encoder.h #define to_rcar_encoder(e) \
e                  23 drivers/gpu/drm/rcar-du/rcar_du_encoder.h 	container_of(e, struct rcar_du_encoder, base)
e                  25 drivers/gpu/drm/rcar-du/rcar_du_encoder.h #define rcar_encoder_to_drm_encoder(e)	(&(e)->base)
e                 268 drivers/gpu/drm/rcar-du/rcar_lvds.c 			unsigned int e;
e                 280 drivers/gpu/drm/rcar-du/rcar_lvds.c 			for (e = e_min; e < 3; ++e) {
e                 290 drivers/gpu/drm/rcar-du/rcar_lvds.c 				fout = fvco / (1 << e) / div7;
e                 298 drivers/gpu/drm/rcar-du/rcar_lvds.c 					pll->pll_e = e;
e                 129 drivers/gpu/drm/savage/savage_bci.c savage_bci_wait_event_shadow(drm_savage_private_t * dev_priv, uint16_t e)
e                 137 drivers/gpu/drm/savage/savage_bci.c 		if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
e                 145 drivers/gpu/drm/savage/savage_bci.c 	DRM_INFO("   status=0x%08x, e=0x%04x\n", status, e);
e                 152 drivers/gpu/drm/savage/savage_bci.c savage_bci_wait_event_reg(drm_savage_private_t * dev_priv, uint16_t e)
e                 159 drivers/gpu/drm/savage/savage_bci.c 		if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
e                 167 drivers/gpu/drm/savage/savage_bci.c 	DRM_INFO("   status=0x%08x, e=0x%04x\n", status, e);
e                 194 drivers/gpu/drm/savage/savage_drv.h 	int (*wait_evnt) (struct drm_savage_private * dev_priv, uint16_t e);
e                 572 drivers/gpu/drm/savage/savage_drv.h #define SET_AGE( age, e, w ) do {	\
e                 573 drivers/gpu/drm/savage/savage_drv.h 	(age)->event = e;		\
e                 577 drivers/gpu/drm/savage/savage_drv.h #define TEST_AGE( age, e, w )				\
e                 578 drivers/gpu/drm/savage/savage_drv.h 	( (age)->wrap < (w) || ( (age)->wrap == (w) && (age)->event <= (e) ) )
e                1194 drivers/gpu/drm/selftests/test-drm_mm.c 	struct evict_node *e, *en;
e                1198 drivers/gpu/drm/selftests/test-drm_mm.c 		e = &nodes[order ? order[i] : i];
e                1199 drivers/gpu/drm/selftests/test-drm_mm.c 		list_add(&e->link, evict_list);
e                1200 drivers/gpu/drm/selftests/test-drm_mm.c 		if (drm_mm_scan_add_block(scan, &e->node))
e                1203 drivers/gpu/drm/selftests/test-drm_mm.c 	list_for_each_entry_safe(e, en, evict_list, link) {
e                1204 drivers/gpu/drm/selftests/test-drm_mm.c 		if (!drm_mm_scan_remove_block(scan, &e->node))
e                1205 drivers/gpu/drm/selftests/test-drm_mm.c 			list_del(&e->link);
e                1213 drivers/gpu/drm/selftests/test-drm_mm.c 	list_for_each_entry(e, evict_list, link)
e                1214 drivers/gpu/drm/selftests/test-drm_mm.c 		drm_mm_remove_node(&e->node);
e                1220 drivers/gpu/drm/selftests/test-drm_mm.c 			e = container_of(node, typeof(*e), node);
e                1221 drivers/gpu/drm/selftests/test-drm_mm.c 			drm_mm_remove_node(&e->node);
e                1222 drivers/gpu/drm/selftests/test-drm_mm.c 			list_add(&e->link, evict_list);
e                1240 drivers/gpu/drm/selftests/test-drm_mm.c 	struct evict_node *e;
e                1246 drivers/gpu/drm/selftests/test-drm_mm.c 		e = &nodes[n];
e                1247 drivers/gpu/drm/selftests/test-drm_mm.c 		list_add(&e->link, &evict_list);
e                1248 drivers/gpu/drm/selftests/test-drm_mm.c 		drm_mm_scan_add_block(&scan, &e->node);
e                1250 drivers/gpu/drm/selftests/test-drm_mm.c 	list_for_each_entry(e, &evict_list, link)
e                1251 drivers/gpu/drm/selftests/test-drm_mm.c 		drm_mm_scan_remove_block(&scan, &e->node);
e                1254 drivers/gpu/drm/selftests/test-drm_mm.c 		e = &nodes[n];
e                1256 drivers/gpu/drm/selftests/test-drm_mm.c 		if (!drm_mm_node_allocated(&e->node)) {
e                1261 drivers/gpu/drm/selftests/test-drm_mm.c 		e->link.next = NULL;
e                1265 drivers/gpu/drm/selftests/test-drm_mm.c 		e = container_of(node, typeof(*e), node);
e                1266 drivers/gpu/drm/selftests/test-drm_mm.c 		e->link.next = &e->link;
e                1270 drivers/gpu/drm/selftests/test-drm_mm.c 		e = &nodes[n];
e                1272 drivers/gpu/drm/selftests/test-drm_mm.c 		if (!e->link.next) {
e                1287 drivers/gpu/drm/selftests/test-drm_mm.c 	struct evict_node *e;
e                1293 drivers/gpu/drm/selftests/test-drm_mm.c 		e = &nodes[n];
e                1294 drivers/gpu/drm/selftests/test-drm_mm.c 		list_add(&e->link, &evict_list);
e                1295 drivers/gpu/drm/selftests/test-drm_mm.c 		if (drm_mm_scan_add_block(&scan, &e->node))
e                1300 drivers/gpu/drm/selftests/test-drm_mm.c 	list_for_each_entry(e, &evict_list, link) {
e                1301 drivers/gpu/drm/selftests/test-drm_mm.c 		if (!drm_mm_scan_remove_block(&scan, &e->node)) {
e                1304 drivers/gpu/drm/selftests/test-drm_mm.c 				       e->node.start);
e                1312 drivers/gpu/drm/selftests/test-drm_mm.c 	list_for_each_entry(e, &evict_list, link)
e                1313 drivers/gpu/drm/selftests/test-drm_mm.c 		drm_mm_remove_node(&e->node);
e                1318 drivers/gpu/drm/selftests/test-drm_mm.c 	list_for_each_entry(e, &evict_list, link) {
e                1319 drivers/gpu/drm/selftests/test-drm_mm.c 		err = drm_mm_reserve_node(mm, &e->node);
e                1322 drivers/gpu/drm/selftests/test-drm_mm.c 			       e->node.start);
e                1341 drivers/gpu/drm/selftests/test-drm_mm.c 	struct evict_node *e;
e                1384 drivers/gpu/drm/selftests/test-drm_mm.c 	list_for_each_entry(e, &evict_list, link) {
e                1385 drivers/gpu/drm/selftests/test-drm_mm.c 		err = drm_mm_reserve_node(mm, &e->node);
e                1388 drivers/gpu/drm/selftests/test-drm_mm.c 			       e->node.start);
e                2095 drivers/gpu/drm/selftests/test-drm_mm.c 	struct evict_node *e;
e                2139 drivers/gpu/drm/selftests/test-drm_mm.c 	list_for_each_entry(e, &evict_list, link) {
e                2140 drivers/gpu/drm/selftests/test-drm_mm.c 		err = drm_mm_reserve_node(mm, &e->node);
e                2143 drivers/gpu/drm/selftests/test-drm_mm.c 			       e->node.start);
e                 497 drivers/gpu/drm/shmobile/shmob_drm_crtc.c #define to_shmob_encoder(e) \
e                 498 drivers/gpu/drm/shmobile/shmob_drm_crtc.c 	container_of(e, struct shmob_drm_encoder, encoder)
e                 132 drivers/gpu/drm/tegra/drm.h static inline struct tegra_output *encoder_to_output(struct drm_encoder *e)
e                 134 drivers/gpu/drm/tegra/drm.h 	return container_of(e, struct tegra_output, encoder);
e                  99 drivers/hid/hid-ntrig.c 	__u8 e =   raw[2] & 0x07;
e                 106 drivers/hid/hid-ntrig.c 	return sprintf(buf, "%u.%u.%u.%u.%u", a, b, c, d, e);
e                 189 drivers/hwmon/pmbus/ltc2978.c 	s16 e = ((s16)data) >> 11;
e                 196 drivers/hwmon/pmbus/ltc2978.c 	e += 6;
e                 197 drivers/hwmon/pmbus/ltc2978.c 	return (e < 0 ? m >> -e : m << e);
e                 467 drivers/ide/setup-pci.c 		const struct ide_pci_enablebit *e = &d->enablebits[port];
e                 469 drivers/ide/setup-pci.c 		if (e->reg && (pci_read_config_byte(dev, e->reg, &tmp) ||
e                 470 drivers/ide/setup-pci.c 		    (tmp & e->mask) != e->val)) {
e                 426 drivers/iio/industrialio-core.c 	const struct iio_enum *e = (const struct iio_enum *)priv;
e                 430 drivers/iio/industrialio-core.c 	if (!e->num_items)
e                 433 drivers/iio/industrialio-core.c 	for (i = 0; i < e->num_items; ++i)
e                 434 drivers/iio/industrialio-core.c 		len += scnprintf(buf + len, PAGE_SIZE - len, "%s ", e->items[i]);
e                 446 drivers/iio/industrialio-core.c 	const struct iio_enum *e = (const struct iio_enum *)priv;
e                 449 drivers/iio/industrialio-core.c 	if (!e->get)
e                 452 drivers/iio/industrialio-core.c 	i = e->get(indio_dev, chan);
e                 455 drivers/iio/industrialio-core.c 	else if (i >= e->num_items)
e                 458 drivers/iio/industrialio-core.c 	return snprintf(buf, PAGE_SIZE, "%s\n", e->items[i]);
e                 466 drivers/iio/industrialio-core.c 	const struct iio_enum *e = (const struct iio_enum *)priv;
e                 469 drivers/iio/industrialio-core.c 	if (!e->set)
e                 472 drivers/iio/industrialio-core.c 	ret = __sysfs_match_string(e->items, e->num_items, buf);
e                 476 drivers/iio/industrialio-core.c 	ret = e->set(indio_dev, chan, ret);
e                 160 drivers/infiniband/core/iwcm.c 	struct list_head *e, *tmp;
e                 162 drivers/infiniband/core/iwcm.c 	list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) {
e                 163 drivers/infiniband/core/iwcm.c 		list_del(e);
e                 164 drivers/infiniband/core/iwcm.c 		kfree(list_entry(e, struct iwcm_work, free_list));
e                  62 drivers/infiniband/core/restrack.c 	struct rdma_restrack_entry *e;
e                  78 drivers/infiniband/core/restrack.c 			xa_for_each(xa, index, e) {
e                  79 drivers/infiniband/core/restrack.c 				if (rdma_is_kernel_res(e)) {
e                  80 drivers/infiniband/core/restrack.c 					owner = e->kern_name;
e                  87 drivers/infiniband/core/restrack.c 					get_task_comm(buf, e->task);
e                  92 drivers/infiniband/core/restrack.c 				       rdma_is_kernel_res(e) ? "Kernel" :
e                  94 drivers/infiniband/core/restrack.c 				       type2str(e->type), owner);
e                 114 drivers/infiniband/core/restrack.c 	struct rdma_restrack_entry *e;
e                 119 drivers/infiniband/core/restrack.c 	xas_for_each(&xas, e, U32_MAX) {
e                 120 drivers/infiniband/core/restrack.c 		if (!rdma_is_visible_in_pid_ns(e))
e                2331 drivers/infiniband/core/sa_query.c 	int s, e, i;
e                2335 drivers/infiniband/core/sa_query.c 	e = rdma_end_port(device);
e                2337 drivers/infiniband/core/sa_query.c 	sa_dev = kzalloc(struct_size(sa_dev, port, e - s + 1), GFP_KERNEL);
e                2342 drivers/infiniband/core/sa_query.c 	sa_dev->end_port   = e;
e                2344 drivers/infiniband/core/sa_query.c 	for (i = 0; i <= e - s; ++i) {
e                2384 drivers/infiniband/core/sa_query.c 	for (i = 0; i <= e - s; ++i) {
e                1358 drivers/infiniband/core/user_mad.c 	int s, e, i;
e                1362 drivers/infiniband/core/user_mad.c 	e = rdma_end_port(device);
e                1364 drivers/infiniband/core/user_mad.c 	umad_dev = kzalloc(struct_size(umad_dev, ports, e - s + 1), GFP_KERNEL);
e                1369 drivers/infiniband/core/user_mad.c 	for (i = s; i <= e; ++i) {
e                 248 drivers/infiniband/core/uverbs_ioctl.c 	struct uverbs_attr *e = &pbundle->bundle.attrs[attr_bkey];
e                 266 drivers/infiniband/core/uverbs_ioctl.c 		e->ptr_attr.enum_id = uattr->attr_data.enum_data.elem_id;
e                 290 drivers/infiniband/core/uverbs_ioctl.c 		e->ptr_attr.uattr_idx = uattr - pbundle->uattrs;
e                 291 drivers/infiniband/core/uverbs_ioctl.c 		e->ptr_attr.len = uattr->len;
e                 293 drivers/infiniband/core/uverbs_ioctl.c 		if (val_spec->alloc_and_copy && !uverbs_attr_ptr_is_inline(e)) {
e                 300 drivers/infiniband/core/uverbs_ioctl.c 			e->ptr_attr.ptr = p;
e                 306 drivers/infiniband/core/uverbs_ioctl.c 			e->ptr_attr.data = uattr->data;
e                 318 drivers/infiniband/core/uverbs_ioctl.c 		o_attr = &e->obj_attr;
e                 347 drivers/infiniband/core/uverbs_ioctl.c 						 &e->objs_arr_attr, uattr,
e                4066 drivers/infiniband/hw/cxgb4/cm.c 	struct l2t_entry *e;
e                4152 drivers/infiniband/hw/cxgb4/cm.c 		e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
e                4158 drivers/infiniband/hw/cxgb4/cm.c 		e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
e                4163 drivers/infiniband/hw/cxgb4/cm.c 	if (!e) {
e                4176 drivers/infiniband/hw/cxgb4/cm.c 						    e));
e                4187 drivers/infiniband/hw/cxgb4/cm.c 	cxgb4_l2t_release(e);
e                1143 drivers/infiniband/hw/hfi1/init.c 	u32 e;
e                1164 drivers/infiniband/hw/hfi1/init.c 	for (e = 0; e < rcd->egrbufs.alloced; e++) {
e                1165 drivers/infiniband/hw/hfi1/init.c 		if (rcd->egrbufs.buffers[e].dma)
e                1167 drivers/infiniband/hw/hfi1/init.c 					  rcd->egrbufs.buffers[e].len,
e                1168 drivers/infiniband/hw/hfi1/init.c 					  rcd->egrbufs.buffers[e].addr,
e                1169 drivers/infiniband/hw/hfi1/init.c 					  rcd->egrbufs.buffers[e].dma);
e                 135 drivers/infiniband/hw/hfi1/opfn.c void opfn_conn_response(struct rvt_qp *qp, struct rvt_ack_entry *e,
e                 153 drivers/infiniband/hw/hfi1/opfn.c 		e->atomic_data = capcode;
e                 170 drivers/infiniband/hw/hfi1/opfn.c 	e->atomic_data = (data & ~0xf) | capcode;
e                  78 drivers/infiniband/hw/hfi1/opfn.h void opfn_conn_response(struct rvt_qp *qp, struct rvt_ack_entry *e,
e                1804 drivers/infiniband/hw/hfi1/pio.c 	struct pio_map_elem *e;
e                1823 drivers/infiniband/hw/hfi1/pio.c 	e = m->map[vl & m->mask];
e                1824 drivers/infiniband/hw/hfi1/pio.c 	rval = e->ksc[selector & e->mask];
e                 657 drivers/infiniband/hw/hfi1/qp.c 	struct rvt_ack_entry *e = NULL;
e                 665 drivers/infiniband/hw/hfi1/qp.c 		e = &qp->s_ack_queue[qp->s_tail_ack_queue];
e                 711 drivers/infiniband/hw/hfi1/qp.c 		   e ? e->opcode : 0,
e                 712 drivers/infiniband/hw/hfi1/qp.c 		   e ? e->psn : 0,
e                 713 drivers/infiniband/hw/hfi1/qp.c 		   e ? e->lpsn : 0,
e                  62 drivers/infiniband/hw/hfi1/rc.c 	struct rvt_ack_entry *e = NULL;
e                  74 drivers/infiniband/hw/hfi1/rc.c 			e = NULL;
e                  77 drivers/infiniband/hw/hfi1/rc.c 		e = &qp->s_ack_queue[p];
e                  78 drivers/infiniband/hw/hfi1/rc.c 		if (!e->opcode) {
e                  79 drivers/infiniband/hw/hfi1/rc.c 			e = NULL;
e                  82 drivers/infiniband/hw/hfi1/rc.c 		if (cmp_psn(psn, e->psn) >= 0) {
e                  84 drivers/infiniband/hw/hfi1/rc.c 			    cmp_psn(psn, e->lpsn) <= 0)
e                  95 drivers/infiniband/hw/hfi1/rc.c 	return e;
e                 113 drivers/infiniband/hw/hfi1/rc.c 	struct rvt_ack_entry *e;
e                 142 drivers/infiniband/hw/hfi1/rc.c 		e = &qp->s_ack_queue[qp->s_tail_ack_queue];
e                 143 drivers/infiniband/hw/hfi1/rc.c 		release_rdma_sge_mr(e);
e                 157 drivers/infiniband/hw/hfi1/rc.c 		e = &qp->s_ack_queue[qp->s_tail_ack_queue];
e                 158 drivers/infiniband/hw/hfi1/rc.c 		if (e->opcode != TID_OP(WRITE_REQ) &&
e                 162 drivers/infiniband/hw/hfi1/rc.c 		trace_hfi1_rsp_make_rc_ack(qp, e->psn);
e                 173 drivers/infiniband/hw/hfi1/rc.c 		e = &qp->s_ack_queue[qp->s_tail_ack_queue];
e                 176 drivers/infiniband/hw/hfi1/rc.c 		    hfi1_tid_rdma_ack_interlock(qp, e)) {
e                 180 drivers/infiniband/hw/hfi1/rc.c 		if (e->opcode == OP(RDMA_READ_REQUEST)) {
e                 187 drivers/infiniband/hw/hfi1/rc.c 			len = e->rdma_sge.sge_length;
e                 188 drivers/infiniband/hw/hfi1/rc.c 			if (len && !e->rdma_sge.mr) {
e                 197 drivers/infiniband/hw/hfi1/rc.c 			ps->s_txreq->mr = e->rdma_sge.mr;
e                 200 drivers/infiniband/hw/hfi1/rc.c 			qp->s_ack_rdma_sge.sge = e->rdma_sge;
e                 208 drivers/infiniband/hw/hfi1/rc.c 				e->sent = 1;
e                 212 drivers/infiniband/hw/hfi1/rc.c 			qp->s_ack_rdma_psn = e->psn;
e                 214 drivers/infiniband/hw/hfi1/rc.c 		} else if (e->opcode == TID_OP(WRITE_REQ)) {
e                 222 drivers/infiniband/hw/hfi1/rc.c 			req = ack_to_tid_req(e);
e                 227 drivers/infiniband/hw/hfi1/rc.c 			qp->s_ack_rdma_psn = mask_psn(e->psn + req->cur_seg);
e                 229 drivers/infiniband/hw/hfi1/rc.c 		} else if (e->opcode == TID_OP(READ_REQ)) {
e                 236 drivers/infiniband/hw/hfi1/rc.c 			len = e->rdma_sge.sge_length;
e                 237 drivers/infiniband/hw/hfi1/rc.c 			if (len && !e->rdma_sge.mr) {
e                 246 drivers/infiniband/hw/hfi1/rc.c 			ps->s_txreq->mr = e->rdma_sge.mr;
e                 249 drivers/infiniband/hw/hfi1/rc.c 			qp->s_ack_rdma_sge.sge = e->rdma_sge;
e                 259 drivers/infiniband/hw/hfi1/rc.c 			ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
e                 261 drivers/infiniband/hw/hfi1/rc.c 			bth2 = mask_psn(e->psn);
e                 262 drivers/infiniband/hw/hfi1/rc.c 			e->sent = 1;
e                 284 drivers/infiniband/hw/hfi1/rc.c 			e = &qp->s_ack_queue[qp->s_tail_ack_queue];
e                 285 drivers/infiniband/hw/hfi1/rc.c 			e->sent = 1;
e                 312 drivers/infiniband/hw/hfi1/rc.c 		e = &qp->s_ack_queue[qp->s_tail_ack_queue];
e                 313 drivers/infiniband/hw/hfi1/rc.c 		req = ack_to_tid_req(e);
e                 328 drivers/infiniband/hw/hfi1/rc.c 		hdrlen = hfi1_build_tid_rdma_write_resp(qp, e, ohdr, &bth1,
e                 337 drivers/infiniband/hw/hfi1/rc.c 		trace_hfi1_tid_req_make_rc_ack_write(qp, 0, e->opcode, e->psn,
e                 338 drivers/infiniband/hw/hfi1/rc.c 						     e->lpsn, req);
e                 342 drivers/infiniband/hw/hfi1/rc.c 		e->sent = 1;
e                 349 drivers/infiniband/hw/hfi1/rc.c 		e = &qp->s_ack_queue[qp->s_tail_ack_queue];
e                 351 drivers/infiniband/hw/hfi1/rc.c 		delta = hfi1_build_tid_rdma_read_resp(qp, e, ohdr, &bth0,
e                 358 drivers/infiniband/hw/hfi1/rc.c 			e->sent = 1;
e                2557 drivers/infiniband/hw/hfi1/rc.c 	struct rvt_ack_entry *e;
e                2601 drivers/infiniband/hw/hfi1/rc.c 	e = NULL;
e                2607 drivers/infiniband/hw/hfi1/rc.c 	e = find_prev_entry(qp, psn, &prev, &mra, &old_req);
e                2619 drivers/infiniband/hw/hfi1/rc.c 		if (!e || e->opcode != OP(RDMA_READ_REQUEST))
e                2630 drivers/infiniband/hw/hfi1/rc.c 		offset = delta_psn(psn, e->psn) * qp->pmtu;
e                2632 drivers/infiniband/hw/hfi1/rc.c 		if (unlikely(offset + len != e->rdma_sge.sge_length))
e                2634 drivers/infiniband/hw/hfi1/rc.c 		release_rdma_sge_mr(e);
e                2640 drivers/infiniband/hw/hfi1/rc.c 			ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
e                2645 drivers/infiniband/hw/hfi1/rc.c 			e->rdma_sge.vaddr = NULL;
e                2646 drivers/infiniband/hw/hfi1/rc.c 			e->rdma_sge.length = 0;
e                2647 drivers/infiniband/hw/hfi1/rc.c 			e->rdma_sge.sge_length = 0;
e                2649 drivers/infiniband/hw/hfi1/rc.c 		e->psn = psn;
e                2665 drivers/infiniband/hw/hfi1/rc.c 		if (!e || e->opcode != (u8)opcode || old_req)
e                3058 drivers/infiniband/hw/hfi1/rc.c 		struct rvt_ack_entry *e;
e                3074 drivers/infiniband/hw/hfi1/rc.c 		e = &qp->s_ack_queue[qp->r_head_ack_queue];
e                3075 drivers/infiniband/hw/hfi1/rc.c 		release_rdma_sge_mr(e);
e                3084 drivers/infiniband/hw/hfi1/rc.c 			ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
e                3094 drivers/infiniband/hw/hfi1/rc.c 			e->rdma_sge.mr = NULL;
e                3095 drivers/infiniband/hw/hfi1/rc.c 			e->rdma_sge.vaddr = NULL;
e                3096 drivers/infiniband/hw/hfi1/rc.c 			e->rdma_sge.length = 0;
e                3097 drivers/infiniband/hw/hfi1/rc.c 			e->rdma_sge.sge_length = 0;
e                3099 drivers/infiniband/hw/hfi1/rc.c 		e->opcode = opcode;
e                3100 drivers/infiniband/hw/hfi1/rc.c 		e->sent = 0;
e                3101 drivers/infiniband/hw/hfi1/rc.c 		e->psn = psn;
e                3102 drivers/infiniband/hw/hfi1/rc.c 		e->lpsn = qp->r_psn;
e                3131 drivers/infiniband/hw/hfi1/rc.c 		struct rvt_ack_entry *e;
e                3149 drivers/infiniband/hw/hfi1/rc.c 		e = &qp->s_ack_queue[qp->r_head_ack_queue];
e                3150 drivers/infiniband/hw/hfi1/rc.c 		release_rdma_sge_mr(e);
e                3153 drivers/infiniband/hw/hfi1/rc.c 			opfn_conn_response(qp, e, ateth);
e                3167 drivers/infiniband/hw/hfi1/rc.c 		e->atomic_data = (opcode == OP(FETCH_ADD)) ?
e                3175 drivers/infiniband/hw/hfi1/rc.c 		e->opcode = opcode;
e                3176 drivers/infiniband/hw/hfi1/rc.c 		e->sent = 0;
e                3177 drivers/infiniband/hw/hfi1/rc.c 		e->psn = psn;
e                3178 drivers/infiniband/hw/hfi1/rc.c 		e->lpsn = psn;
e                  44 drivers/infiniband/hw/hfi1/rc.h static inline void release_rdma_sge_mr(struct rvt_ack_entry *e)
e                  46 drivers/infiniband/hw/hfi1/rc.h 	if (e->rdma_sge.mr) {
e                  47 drivers/infiniband/hw/hfi1/rc.h 		rvt_put_mr(e->rdma_sge.mr);
e                  48 drivers/infiniband/hw/hfi1/rc.h 		e->rdma_sge.mr = NULL;
e                 786 drivers/infiniband/hw/hfi1/sdma.c 	struct sdma_map_elem *e;
e                 804 drivers/infiniband/hw/hfi1/sdma.c 	e = m->map[vl & m->mask];
e                 805 drivers/infiniband/hw/hfi1/sdma.c 	rval = e->sde[selector & e->mask];
e                 385 drivers/infiniband/hw/hfi1/tid_rdma.c 			priv->tid_req.e.swqe = wqe;
e                 397 drivers/infiniband/hw/hfi1/tid_rdma.c 			priv->tid_req.e.ack = &qp->s_ack_queue[i];
e                1883 drivers/infiniband/hw/hfi1/tid_rdma.c 				     struct rvt_ack_entry *e,
e                1893 drivers/infiniband/hw/hfi1/tid_rdma.c 	req = ack_to_tid_req(e);
e                1957 drivers/infiniband/hw/hfi1/tid_rdma.c 	e->opcode = (bth0 >> 24) & 0xff;
e                1958 drivers/infiniband/hw/hfi1/tid_rdma.c 	e->psn = psn;
e                1959 drivers/infiniband/hw/hfi1/tid_rdma.c 	e->lpsn = psn + flow->npkts - 1;
e                1960 drivers/infiniband/hw/hfi1/tid_rdma.c 	e->sent = 0;
e                1971 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->r_flow_psn = e->psn;
e                1973 drivers/infiniband/hw/hfi1/tid_rdma.c 	trace_hfi1_tid_req_rcv_read_req(qp, 0, e->opcode, e->psn, e->lpsn,
e                1986 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct rvt_ack_entry *e;
e                2008 drivers/infiniband/hw/hfi1/tid_rdma.c 	e = find_prev_entry(qp, psn, &prev, NULL, &old_req);
e                2009 drivers/infiniband/hw/hfi1/tid_rdma.c 	if (!e || (e->opcode != TID_OP(READ_REQ) &&
e                2010 drivers/infiniband/hw/hfi1/tid_rdma.c 		   e->opcode != TID_OP(WRITE_REQ)))
e                2013 drivers/infiniband/hw/hfi1/tid_rdma.c 	req = ack_to_tid_req(e);
e                2015 drivers/infiniband/hw/hfi1/tid_rdma.c 	trace_hfi1_tid_req_rcv_err(qp, 0, e->opcode, e->psn, e->lpsn, req);
e                2016 drivers/infiniband/hw/hfi1/tid_rdma.c 	if (e->opcode == TID_OP(READ_REQ)) {
e                2030 drivers/infiniband/hw/hfi1/tid_rdma.c 		if (psn != e->psn || len != req->total_len)
e                2033 drivers/infiniband/hw/hfi1/tid_rdma.c 		release_rdma_sge_mr(e);
e                2039 drivers/infiniband/hw/hfi1/tid_rdma.c 		ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
e                2055 drivers/infiniband/hw/hfi1/tid_rdma.c 		if (tid_rdma_rcv_read_request(qp, e, packet, ohdr, bth0, psn,
e                2091 drivers/infiniband/hw/hfi1/tid_rdma.c 				e = &qp->s_ack_queue[i];
e                2092 drivers/infiniband/hw/hfi1/tid_rdma.c 				req = ack_to_tid_req(e);
e                2093 drivers/infiniband/hw/hfi1/tid_rdma.c 				if (e->opcode == TID_OP(WRITE_REQ) &&
e                2139 drivers/infiniband/hw/hfi1/tid_rdma.c 				req->cur_seg = delta_psn(psn, e->psn);
e                2153 drivers/infiniband/hw/hfi1/tid_rdma.c 			e = &qp->s_ack_queue[i];
e                2154 drivers/infiniband/hw/hfi1/tid_rdma.c 			req = ack_to_tid_req(e);
e                2155 drivers/infiniband/hw/hfi1/tid_rdma.c 			trace_hfi1_tid_req_rcv_err(qp, 0, e->opcode, e->psn,
e                2156 drivers/infiniband/hw/hfi1/tid_rdma.c 						   e->lpsn, req);
e                2157 drivers/infiniband/hw/hfi1/tid_rdma.c 			if (e->opcode != TID_OP(WRITE_REQ) ||
e                2194 drivers/infiniband/hw/hfi1/tid_rdma.c 		qp->r_psn = e->lpsn + 1;
e                2198 drivers/infiniband/hw/hfi1/tid_rdma.c 	qp->r_state = e->opcode;
e                2227 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct rvt_ack_entry *e;
e                2277 drivers/infiniband/hw/hfi1/tid_rdma.c 	e = &qp->s_ack_queue[qp->r_head_ack_queue];
e                2278 drivers/infiniband/hw/hfi1/tid_rdma.c 	release_rdma_sge_mr(e);
e                2283 drivers/infiniband/hw/hfi1/tid_rdma.c 	if (unlikely(!rvt_rkey_ok(qp, &e->rdma_sge, qp->r_len, vaddr,
e                2288 drivers/infiniband/hw/hfi1/tid_rdma.c 	if (tid_rdma_rcv_read_request(qp, e, packet, ohdr, bth0, psn, vaddr,
e                2292 drivers/infiniband/hw/hfi1/tid_rdma.c 	qp->r_state = e->opcode;
e                2300 drivers/infiniband/hw/hfi1/tid_rdma.c 	qp->r_psn += e->lpsn - e->psn + 1;
e                2337 drivers/infiniband/hw/hfi1/tid_rdma.c u32 hfi1_build_tid_rdma_read_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
e                2341 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct hfi1_ack_priv *epriv = e->priv;
e                2494 drivers/infiniband/hw/hfi1/tid_rdma.c 			len = restart_sge(&ss, req->e.swqe, ipsn, pmtu);
e                2521 drivers/infiniband/hw/hfi1/tid_rdma.c 	trace_hfi1_tid_req_rcv_read_resp(qp, 0, req->e.swqe->wr.opcode,
e                2522 drivers/infiniband/hw/hfi1/tid_rdma.c 					 req->e.swqe->psn, req->e.swqe->lpsn,
e                2856 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct rvt_ack_entry *e;
e                2931 drivers/infiniband/hw/hfi1/tid_rdma.c 	e = &qp->s_ack_queue[qpriv->r_tid_tail];
e                2932 drivers/infiniband/hw/hfi1/tid_rdma.c 	if (e->opcode != TID_OP(WRITE_REQ))
e                2934 drivers/infiniband/hw/hfi1/tid_rdma.c 	req = ack_to_tid_req(e);
e                2941 drivers/infiniband/hw/hfi1/tid_rdma.c 	trace_hfi1_tid_req_handle_kdeth_eflags(qp, 0, e->opcode, e->psn,
e                2942 drivers/infiniband/hw/hfi1/tid_rdma.c 					       e->lpsn, req);
e                3186 drivers/infiniband/hw/hfi1/tid_rdma.c 		struct rvt_ack_entry *e = &qp->s_ack_queue[i];
e                3191 drivers/infiniband/hw/hfi1/tid_rdma.c 		if (e->opcode != TID_OP(WRITE_REQ))
e                3194 drivers/infiniband/hw/hfi1/tid_rdma.c 			struct hfi1_ack_priv *priv = e->priv;
e                3457 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct rvt_ack_entry *e;
e                3493 drivers/infiniband/hw/hfi1/tid_rdma.c 		e = &qp->s_ack_queue[qpriv->r_tid_alloc];
e                3494 drivers/infiniband/hw/hfi1/tid_rdma.c 		if (e->opcode != TID_OP(WRITE_REQ))
e                3496 drivers/infiniband/hw/hfi1/tid_rdma.c 		req = ack_to_tid_req(e);
e                3497 drivers/infiniband/hw/hfi1/tid_rdma.c 		trace_hfi1_tid_req_write_alloc_res(qp, 0, e->opcode, e->psn,
e                3498 drivers/infiniband/hw/hfi1/tid_rdma.c 						   e->lpsn, req);
e                3589 drivers/infiniband/hw/hfi1/tid_rdma.c 	qp->r_psn = e->psn + req->alloc_seg;
e                3650 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct rvt_ack_entry *e;
e                3706 drivers/infiniband/hw/hfi1/tid_rdma.c 	e = &qp->s_ack_queue[qp->r_head_ack_queue];
e                3707 drivers/infiniband/hw/hfi1/tid_rdma.c 	req = ack_to_tid_req(e);
e                3714 drivers/infiniband/hw/hfi1/tid_rdma.c 		qp->r_psn = e->lpsn + 1;
e                3719 drivers/infiniband/hw/hfi1/tid_rdma.c 	release_rdma_sge_mr(e);
e                3728 drivers/infiniband/hw/hfi1/tid_rdma.c 	if (e->opcode == TID_OP(WRITE_REQ) &&
e                3733 drivers/infiniband/hw/hfi1/tid_rdma.c 	if (unlikely(!rvt_rkey_ok(qp, &e->rdma_sge, qp->r_len, vaddr,
e                3739 drivers/infiniband/hw/hfi1/tid_rdma.c 	e->opcode = (bth0 >> 24) & 0xff;
e                3740 drivers/infiniband/hw/hfi1/tid_rdma.c 	e->psn = psn;
e                3741 drivers/infiniband/hw/hfi1/tid_rdma.c 	e->lpsn = qp->r_psn;
e                3742 drivers/infiniband/hw/hfi1/tid_rdma.c 	e->sent = 0;
e                3754 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->r_flow_psn = e->psn;
e                3755 drivers/infiniband/hw/hfi1/tid_rdma.c 	req->ss.sge = e->rdma_sge;
e                3762 drivers/infiniband/hw/hfi1/tid_rdma.c 	qp->r_state = e->opcode;
e                3772 drivers/infiniband/hw/hfi1/tid_rdma.c 	trace_hfi1_tid_req_rcv_write_req(qp, 0, e->opcode, e->psn, e->lpsn,
e                3780 drivers/infiniband/hw/hfi1/tid_rdma.c 		e = &qp->s_ack_queue[qpriv->r_tid_tail];
e                3781 drivers/infiniband/hw/hfi1/tid_rdma.c 		ptr = ack_to_tid_req(e);
e                3783 drivers/infiniband/hw/hfi1/tid_rdma.c 		if (e->opcode != TID_OP(WRITE_REQ) ||
e                3822 drivers/infiniband/hw/hfi1/tid_rdma.c u32 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
e                3827 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct hfi1_ack_priv *epriv = e->priv;
e                3835 drivers/infiniband/hw/hfi1/tid_rdma.c 	trace_hfi1_tid_req_build_write_resp(qp, 0, e->opcode, e->psn, e->lpsn,
e                4262 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct rvt_ack_entry *e;
e                4280 drivers/infiniband/hw/hfi1/tid_rdma.c 	e = &qp->s_ack_queue[priv->r_tid_tail];
e                4281 drivers/infiniband/hw/hfi1/tid_rdma.c 	req = ack_to_tid_req(e);
e                4320 drivers/infiniband/hw/hfi1/tid_rdma.c 			ss.sge = e->rdma_sge;
e                4348 drivers/infiniband/hw/hfi1/tid_rdma.c 	trace_hfi1_tid_req_rcv_write_data(qp, 0, e->opcode, e->psn, e->lpsn,
e                4354 drivers/infiniband/hw/hfi1/tid_rdma.c 		release_rdma_sge_mr(e);
e                4360 drivers/infiniband/hw/hfi1/tid_rdma.c 			e = &qp->s_ack_queue[next];
e                4361 drivers/infiniband/hw/hfi1/tid_rdma.c 			if (e->opcode == TID_OP(WRITE_REQ))
e                4413 drivers/infiniband/hw/hfi1/tid_rdma.c u32 hfi1_build_tid_rdma_write_ack(struct rvt_qp *qp, struct rvt_ack_entry *e,
e                4419 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_request *req = ack_to_tid_req(e);
e                4863 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct rvt_ack_entry *e;
e                4920 drivers/infiniband/hw/hfi1/tid_rdma.c 		e = &qp->s_ack_queue[idx];
e                4921 drivers/infiniband/hw/hfi1/tid_rdma.c 		if (e->opcode == TID_OP(WRITE_REQ)) {
e                4922 drivers/infiniband/hw/hfi1/tid_rdma.c 			req = ack_to_tid_req(e);
e                4923 drivers/infiniband/hw/hfi1/tid_rdma.c 			trace_hfi1_tid_req_rcv_resync(qp, 0, e->opcode, e->psn,
e                4924 drivers/infiniband/hw/hfi1/tid_rdma.c 						      e->lpsn, req);
e                5003 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct rvt_ack_entry *e = &qp->s_ack_queue[qp->s_tail_ack_queue];
e                5004 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct tid_rdma_request *req = ack_to_tid_req(e);
e                5018 drivers/infiniband/hw/hfi1/tid_rdma.c 	    (e->opcode == TID_OP(WRITE_REQ) && req->cur_seg < req->alloc_seg &&
e                5177 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct rvt_ack_entry *e;
e                5195 drivers/infiniband/hw/hfi1/tid_rdma.c 	e = &qp->s_ack_queue[qpriv->r_tid_ack];
e                5196 drivers/infiniband/hw/hfi1/tid_rdma.c 	req = ack_to_tid_req(e);
e                5214 drivers/infiniband/hw/hfi1/tid_rdma.c 		e = &qp->s_ack_queue[qpriv->r_tid_ack];
e                5215 drivers/infiniband/hw/hfi1/tid_rdma.c 		req = ack_to_tid_req(e);
e                5218 drivers/infiniband/hw/hfi1/tid_rdma.c 	trace_hfi1_rsp_make_tid_ack(qp, e->psn);
e                5219 drivers/infiniband/hw/hfi1/tid_rdma.c 	trace_hfi1_tid_req_make_tid_ack(qp, 0, e->opcode, e->psn, e->lpsn,
e                5264 drivers/infiniband/hw/hfi1/tid_rdma.c 		e = &qp->s_ack_queue[qpriv->r_tid_ack];
e                5265 drivers/infiniband/hw/hfi1/tid_rdma.c 		req = ack_to_tid_req(e);
e                5285 drivers/infiniband/hw/hfi1/tid_rdma.c 		e = &qp->s_ack_queue[qpriv->r_tid_ack];
e                5286 drivers/infiniband/hw/hfi1/tid_rdma.c 		req = ack_to_tid_req(e);
e                5293 drivers/infiniband/hw/hfi1/tid_rdma.c 	trace_hfi1_tid_req_make_tid_ack(qp, 0, e->opcode, e->psn, e->lpsn,
e                5295 drivers/infiniband/hw/hfi1/tid_rdma.c 	hwords += hfi1_build_tid_rdma_write_ack(qp, e, ohdr, flow, &bth1,
e                5449 drivers/infiniband/hw/hfi1/tid_rdma.c bool hfi1_tid_rdma_ack_interlock(struct rvt_qp *qp, struct rvt_ack_entry *e)
e                5461 drivers/infiniband/hw/hfi1/tid_rdma.c 	if ((e->opcode == TID_OP(READ_REQ) ||
e                5462 drivers/infiniband/hw/hfi1/tid_rdma.c 	     e->opcode == OP(RDMA_READ_REQUEST)) &&
e                 103 drivers/infiniband/hw/hfi1/tid_rdma.h 	} e;
e                 251 drivers/infiniband/hw/hfi1/tid_rdma.h u32 hfi1_build_tid_rdma_read_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
e                 280 drivers/infiniband/hw/hfi1/tid_rdma.h u32 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
e                 295 drivers/infiniband/hw/hfi1/tid_rdma.h u32 hfi1_build_tid_rdma_write_ack(struct rvt_qp *qp, struct rvt_ack_entry *e,
e                 317 drivers/infiniband/hw/hfi1/tid_rdma.h bool hfi1_tid_rdma_ack_interlock(struct rvt_qp *qp, struct rvt_ack_entry *e);
e                 153 drivers/infiniband/hw/hfi1/trace_tx.h 		     u16 e,
e                 155 drivers/infiniband/hw/hfi1/trace_tx.h 		     TP_ARGS(sde, desc0, desc1, e, descp),
e                 160 drivers/infiniband/hw/hfi1/trace_tx.h 		     __field(u16, e)
e                 168 drivers/infiniband/hw/hfi1/trace_tx.h 		     __entry->e = e;
e                 184 drivers/infiniband/hw/hfi1/trace_tx.h 	    __entry->e
e                 381 drivers/infiniband/hw/hfi1/verbs.h static inline struct tid_rdma_request *ack_to_tid_req(struct rvt_ack_entry *e)
e                 383 drivers/infiniband/hw/hfi1/verbs.h 	return &((struct hfi1_ack_priv *)e->priv)->tid_req;
e                  47 drivers/infiniband/hw/mlx5/srq.h 	void (*event)(struct mlx5_core_srq *srq, enum mlx5_event e);
e                 922 drivers/infiniband/hw/qib/qib_init.c 		unsigned e;
e                 924 drivers/infiniband/hw/qib/qib_init.c 		for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
e                 925 drivers/infiniband/hw/qib/qib_init.c 			void *base = rcd->rcvegrbuf[e];
e                 929 drivers/infiniband/hw/qib/qib_init.c 					  base, rcd->rcvegrbuf_phys[e]);
e                1623 drivers/infiniband/hw/qib/qib_init.c 	unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff;
e                1658 drivers/infiniband/hw/qib/qib_init.c 	for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
e                1659 drivers/infiniband/hw/qib/qib_init.c 		if (rcd->rcvegrbuf[e])
e                1664 drivers/infiniband/hw/qib/qib_init.c 		rcd->rcvegrbuf[e] =
e                1666 drivers/infiniband/hw/qib/qib_init.c 					   &rcd->rcvegrbuf_phys[e],
e                1669 drivers/infiniband/hw/qib/qib_init.c 		if (!rcd->rcvegrbuf[e])
e                1675 drivers/infiniband/hw/qib/qib_init.c 	for (e = chunk = 0; chunk < rcd->rcvegrbuf_chunks; chunk++) {
e                1682 drivers/infiniband/hw/qib/qib_init.c 		for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
e                1683 drivers/infiniband/hw/qib/qib_init.c 			dd->f_put_tid(dd, e + egroff +
e                1697 drivers/infiniband/hw/qib/qib_init.c 	for (e = 0; e < rcd->rcvegrbuf_chunks && rcd->rcvegrbuf[e]; e++)
e                1699 drivers/infiniband/hw/qib/qib_init.c 				  rcd->rcvegrbuf[e], rcd->rcvegrbuf_phys[e]);
e                  65 drivers/infiniband/hw/qib/qib_rc.c 	struct rvt_ack_entry *e;
e                  81 drivers/infiniband/hw/qib/qib_rc.c 		e = &qp->s_ack_queue[qp->s_tail_ack_queue];
e                  82 drivers/infiniband/hw/qib/qib_rc.c 		if (e->rdma_sge.mr) {
e                  83 drivers/infiniband/hw/qib/qib_rc.c 			rvt_put_mr(e->rdma_sge.mr);
e                  84 drivers/infiniband/hw/qib/qib_rc.c 			e->rdma_sge.mr = NULL;
e                 105 drivers/infiniband/hw/qib/qib_rc.c 		e = &qp->s_ack_queue[qp->s_tail_ack_queue];
e                 106 drivers/infiniband/hw/qib/qib_rc.c 		if (e->opcode == OP(RDMA_READ_REQUEST)) {
e                 113 drivers/infiniband/hw/qib/qib_rc.c 			len = e->rdma_sge.sge_length;
e                 114 drivers/infiniband/hw/qib/qib_rc.c 			if (len && !e->rdma_sge.mr) {
e                 119 drivers/infiniband/hw/qib/qib_rc.c 			qp->s_rdma_mr = e->rdma_sge.mr;
e                 122 drivers/infiniband/hw/qib/qib_rc.c 			qp->s_ack_rdma_sge.sge = e->rdma_sge;
e                 130 drivers/infiniband/hw/qib/qib_rc.c 				e->sent = 1;
e                 134 drivers/infiniband/hw/qib/qib_rc.c 			qp->s_ack_rdma_psn = e->psn;
e                 142 drivers/infiniband/hw/qib/qib_rc.c 			ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
e                 144 drivers/infiniband/hw/qib/qib_rc.c 			bth2 = e->psn & QIB_PSN_MASK;
e                 145 drivers/infiniband/hw/qib/qib_rc.c 			e->sent = 1;
e                 165 drivers/infiniband/hw/qib/qib_rc.c 			e = &qp->s_ack_queue[qp->s_tail_ack_queue];
e                 166 drivers/infiniband/hw/qib/qib_rc.c 			e->sent = 1;
e                1499 drivers/infiniband/hw/qib/qib_rc.c 	struct rvt_ack_entry *e;
e                1545 drivers/infiniband/hw/qib/qib_rc.c 	e = NULL;
e                1559 drivers/infiniband/hw/qib/qib_rc.c 			e = NULL;
e                1562 drivers/infiniband/hw/qib/qib_rc.c 		e = &qp->s_ack_queue[prev];
e                1563 drivers/infiniband/hw/qib/qib_rc.c 		if (!e->opcode) {
e                1564 drivers/infiniband/hw/qib/qib_rc.c 			e = NULL;
e                1567 drivers/infiniband/hw/qib/qib_rc.c 		if (qib_cmp24(psn, e->psn) >= 0) {
e                1569 drivers/infiniband/hw/qib/qib_rc.c 			    qib_cmp24(psn, e->lpsn) <= 0)
e                1584 drivers/infiniband/hw/qib/qib_rc.c 		if (!e || e->opcode != OP(RDMA_READ_REQUEST))
e                1595 drivers/infiniband/hw/qib/qib_rc.c 		offset = ((psn - e->psn) & QIB_PSN_MASK) *
e                1598 drivers/infiniband/hw/qib/qib_rc.c 		if (unlikely(offset + len != e->rdma_sge.sge_length))
e                1600 drivers/infiniband/hw/qib/qib_rc.c 		if (e->rdma_sge.mr) {
e                1601 drivers/infiniband/hw/qib/qib_rc.c 			rvt_put_mr(e->rdma_sge.mr);
e                1602 drivers/infiniband/hw/qib/qib_rc.c 			e->rdma_sge.mr = NULL;
e                1609 drivers/infiniband/hw/qib/qib_rc.c 			ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
e                1614 drivers/infiniband/hw/qib/qib_rc.c 			e->rdma_sge.vaddr = NULL;
e                1615 drivers/infiniband/hw/qib/qib_rc.c 			e->rdma_sge.length = 0;
e                1616 drivers/infiniband/hw/qib/qib_rc.c 			e->rdma_sge.sge_length = 0;
e                1618 drivers/infiniband/hw/qib/qib_rc.c 		e->psn = psn;
e                1632 drivers/infiniband/hw/qib/qib_rc.c 		if (!e || e->opcode != (u8) opcode || old_req)
e                1937 drivers/infiniband/hw/qib/qib_rc.c 		struct rvt_ack_entry *e;
e                1953 drivers/infiniband/hw/qib/qib_rc.c 		e = &qp->s_ack_queue[qp->r_head_ack_queue];
e                1954 drivers/infiniband/hw/qib/qib_rc.c 		if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
e                1955 drivers/infiniband/hw/qib/qib_rc.c 			rvt_put_mr(e->rdma_sge.mr);
e                1956 drivers/infiniband/hw/qib/qib_rc.c 			e->rdma_sge.mr = NULL;
e                1966 drivers/infiniband/hw/qib/qib_rc.c 			ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
e                1976 drivers/infiniband/hw/qib/qib_rc.c 			e->rdma_sge.mr = NULL;
e                1977 drivers/infiniband/hw/qib/qib_rc.c 			e->rdma_sge.vaddr = NULL;
e                1978 drivers/infiniband/hw/qib/qib_rc.c 			e->rdma_sge.length = 0;
e                1979 drivers/infiniband/hw/qib/qib_rc.c 			e->rdma_sge.sge_length = 0;
e                1981 drivers/infiniband/hw/qib/qib_rc.c 		e->opcode = opcode;
e                1982 drivers/infiniband/hw/qib/qib_rc.c 		e->sent = 0;
e                1983 drivers/infiniband/hw/qib/qib_rc.c 		e->psn = psn;
e                1984 drivers/infiniband/hw/qib/qib_rc.c 		e->lpsn = qp->r_psn;
e                2006 drivers/infiniband/hw/qib/qib_rc.c 		struct rvt_ack_entry *e;
e                2024 drivers/infiniband/hw/qib/qib_rc.c 		e = &qp->s_ack_queue[qp->r_head_ack_queue];
e                2025 drivers/infiniband/hw/qib/qib_rc.c 		if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
e                2026 drivers/infiniband/hw/qib/qib_rc.c 			rvt_put_mr(e->rdma_sge.mr);
e                2027 drivers/infiniband/hw/qib/qib_rc.c 			e->rdma_sge.mr = NULL;
e                2042 drivers/infiniband/hw/qib/qib_rc.c 		e->atomic_data = (opcode == OP(FETCH_ADD)) ?
e                2049 drivers/infiniband/hw/qib/qib_rc.c 		e->opcode = opcode;
e                2050 drivers/infiniband/hw/qib/qib_rc.c 		e->sent = 0;
e                2051 drivers/infiniband/hw/qib/qib_rc.c 		e->psn = psn;
e                2052 drivers/infiniband/hw/qib/qib_rc.c 		e->lpsn = psn;
e                 324 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 		struct ib_event e;
e                 326 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 		e.device = ibqp->device;
e                 327 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 		e.element.qp = ibqp;
e                 328 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 		e.event = type; /* 1:1 mapping for now. */
e                 329 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 		ibqp->event_handler(&e, ibqp->qp_context);
e                 350 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 		struct ib_event e;
e                 352 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 		e.device = ibcq->device;
e                 353 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 		e.element.cq = ibcq;
e                 354 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 		e.event = type; /* 1:1 mapping for now. */
e                 355 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 		ibcq->event_handler(&e, ibcq->cq_context);
e                 379 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 		struct ib_event e;
e                 381 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 		e.device = ibsrq->device;
e                 382 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 		e.element.srq = ibsrq;
e                 383 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 		e.event = type; /* 1:1 mapping for now. */
e                 384 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 		ibsrq->event_handler(&e, ibsrq->srq_context);
e                 644 drivers/infiniband/sw/rdmavt/qp.c 		struct rvt_ack_entry *e = &qp->s_ack_queue[n];
e                 646 drivers/infiniband/sw/rdmavt/qp.c 		if (e->rdma_sge.mr) {
e                 647 drivers/infiniband/sw/rdmavt/qp.c 			rvt_put_mr(e->rdma_sge.mr);
e                 648 drivers/infiniband/sw/rdmavt/qp.c 			e->rdma_sge.mr = NULL;
e                 708 drivers/infiniband/sw/rdmavt/qp.c 		struct rvt_ack_entry *e = &qp->s_ack_queue[i];
e                 710 drivers/infiniband/sw/rdmavt/qp.c 		if (rvt_mr_has_lkey(e->rdma_sge.mr, lkey))
e                1466 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	int e = skb_queue_empty(&priv->cm.skb_queue);
e                1471 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	if (e)
e                  65 drivers/infiniband/ulp/isert/ib_isert.c isert_qp_event_callback(struct ib_event *e, void *context)
e                  70 drivers/infiniband/ulp/isert/ib_isert.c 		  ib_event_msg(e->event), e->event, isert_conn);
e                  72 drivers/infiniband/ulp/isert/ib_isert.c 	switch (e->event) {
e                 294 drivers/input/gameport/gameport.c 	struct gameport_event *e, *next;
e                 299 drivers/input/gameport/gameport.c 	list_for_each_entry_safe(e, next, &gameport_event_list, node) {
e                 300 drivers/input/gameport/gameport.c 		if (event->object == e->object) {
e                 306 drivers/input/gameport/gameport.c 			if (event->type != e->type)
e                 309 drivers/input/gameport/gameport.c 			list_del_init(&e->node);
e                 310 drivers/input/gameport/gameport.c 			gameport_free_event(e);
e                  61 drivers/input/misc/yealink.c #define _SEG(t, a, am, b, bm, c, cm, d, dm, e, em, f, fm, g, gm)	\
e                  64 drivers/input/misc/yealink.c 		        _LOC(d, dm), _LOC(e, em), _LOC(g, gm),		\
e                  23 drivers/input/mouse/cypress_ps2.h #define CYTP_CMD_SET_PALM_GEOMETRY(e)       ENCODE_CMD(1, 2, 1, (e))
e                 176 drivers/input/serio/serio.c 	struct serio_event *e, *next;
e                 181 drivers/input/serio/serio.c 	list_for_each_entry_safe(e, next, &serio_event_list, node) {
e                 182 drivers/input/serio/serio.c 		if (object == e->object) {
e                 188 drivers/input/serio/serio.c 			if (type != e->type)
e                 191 drivers/input/serio/serio.c 			list_del_init(&e->node);
e                 192 drivers/input/serio/serio.c 			serio_free_event(e);
e                 171 drivers/input/sparse-keymap.c 	const struct key_entry *e;
e                 176 drivers/input/sparse-keymap.c 	for (e = keymap; e->type != KE_END; e++)
e                1148 drivers/iommu/amd_iommu_init.c 	struct ivhd_entry *e;
e                1179 drivers/iommu/amd_iommu_init.c 		e = (struct ivhd_entry *)p;
e                1180 drivers/iommu/amd_iommu_init.c 		switch (e->type) {
e                1183 drivers/iommu/amd_iommu_init.c 			DUMP_printk("  DEV_ALL\t\t\tflags: %02x\n", e->flags);
e                1186 drivers/iommu/amd_iommu_init.c 				set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
e                1192 drivers/iommu/amd_iommu_init.c 				    PCI_BUS_NUM(e->devid),
e                1193 drivers/iommu/amd_iommu_init.c 				    PCI_SLOT(e->devid),
e                1194 drivers/iommu/amd_iommu_init.c 				    PCI_FUNC(e->devid),
e                1195 drivers/iommu/amd_iommu_init.c 				    e->flags);
e                1197 drivers/iommu/amd_iommu_init.c 			devid = e->devid;
e                1198 drivers/iommu/amd_iommu_init.c 			set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
e                1204 drivers/iommu/amd_iommu_init.c 				    PCI_BUS_NUM(e->devid),
e                1205 drivers/iommu/amd_iommu_init.c 				    PCI_SLOT(e->devid),
e                1206 drivers/iommu/amd_iommu_init.c 				    PCI_FUNC(e->devid),
e                1207 drivers/iommu/amd_iommu_init.c 				    e->flags);
e                1209 drivers/iommu/amd_iommu_init.c 			devid_start = e->devid;
e                1210 drivers/iommu/amd_iommu_init.c 			flags = e->flags;
e                1218 drivers/iommu/amd_iommu_init.c 				    PCI_BUS_NUM(e->devid),
e                1219 drivers/iommu/amd_iommu_init.c 				    PCI_SLOT(e->devid),
e                1220 drivers/iommu/amd_iommu_init.c 				    PCI_FUNC(e->devid),
e                1221 drivers/iommu/amd_iommu_init.c 				    e->flags,
e                1222 drivers/iommu/amd_iommu_init.c 				    PCI_BUS_NUM(e->ext >> 8),
e                1223 drivers/iommu/amd_iommu_init.c 				    PCI_SLOT(e->ext >> 8),
e                1224 drivers/iommu/amd_iommu_init.c 				    PCI_FUNC(e->ext >> 8));
e                1226 drivers/iommu/amd_iommu_init.c 			devid = e->devid;
e                1227 drivers/iommu/amd_iommu_init.c 			devid_to = e->ext >> 8;
e                1228 drivers/iommu/amd_iommu_init.c 			set_dev_entry_from_acpi(iommu, devid   , e->flags, 0);
e                1229 drivers/iommu/amd_iommu_init.c 			set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
e                1237 drivers/iommu/amd_iommu_init.c 				    PCI_BUS_NUM(e->devid),
e                1238 drivers/iommu/amd_iommu_init.c 				    PCI_SLOT(e->devid),
e                1239 drivers/iommu/amd_iommu_init.c 				    PCI_FUNC(e->devid),
e                1240 drivers/iommu/amd_iommu_init.c 				    e->flags,
e                1241 drivers/iommu/amd_iommu_init.c 				    PCI_BUS_NUM(e->ext >> 8),
e                1242 drivers/iommu/amd_iommu_init.c 				    PCI_SLOT(e->ext >> 8),
e                1243 drivers/iommu/amd_iommu_init.c 				    PCI_FUNC(e->ext >> 8));
e                1245 drivers/iommu/amd_iommu_init.c 			devid_start = e->devid;
e                1246 drivers/iommu/amd_iommu_init.c 			flags = e->flags;
e                1247 drivers/iommu/amd_iommu_init.c 			devid_to = e->ext >> 8;
e                1255 drivers/iommu/amd_iommu_init.c 				    PCI_BUS_NUM(e->devid),
e                1256 drivers/iommu/amd_iommu_init.c 				    PCI_SLOT(e->devid),
e                1257 drivers/iommu/amd_iommu_init.c 				    PCI_FUNC(e->devid),
e                1258 drivers/iommu/amd_iommu_init.c 				    e->flags, e->ext);
e                1260 drivers/iommu/amd_iommu_init.c 			devid = e->devid;
e                1261 drivers/iommu/amd_iommu_init.c 			set_dev_entry_from_acpi(iommu, devid, e->flags,
e                1262 drivers/iommu/amd_iommu_init.c 						e->ext);
e                1268 drivers/iommu/amd_iommu_init.c 				    PCI_BUS_NUM(e->devid),
e                1269 drivers/iommu/amd_iommu_init.c 				    PCI_SLOT(e->devid),
e                1270 drivers/iommu/amd_iommu_init.c 				    PCI_FUNC(e->devid),
e                1271 drivers/iommu/amd_iommu_init.c 				    e->flags, e->ext);
e                1273 drivers/iommu/amd_iommu_init.c 			devid_start = e->devid;
e                1274 drivers/iommu/amd_iommu_init.c 			flags = e->flags;
e                1275 drivers/iommu/amd_iommu_init.c 			ext_flags = e->ext;
e                1281 drivers/iommu/amd_iommu_init.c 				    PCI_BUS_NUM(e->devid),
e                1282 drivers/iommu/amd_iommu_init.c 				    PCI_SLOT(e->devid),
e                1283 drivers/iommu/amd_iommu_init.c 				    PCI_FUNC(e->devid));
e                1285 drivers/iommu/amd_iommu_init.c 			devid = e->devid;
e                1302 drivers/iommu/amd_iommu_init.c 			handle = e->ext & 0xff;
e                1303 drivers/iommu/amd_iommu_init.c 			devid  = (e->ext >>  8) & 0xffff;
e                1304 drivers/iommu/amd_iommu_init.c 			type   = (e->ext >> 24) & 0xff;
e                1328 drivers/iommu/amd_iommu_init.c 			set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
e                1340 drivers/iommu/amd_iommu_init.c 				       e->type);
e                1344 drivers/iommu/amd_iommu_init.c 			memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1);
e                1353 drivers/iommu/amd_iommu_init.c 			switch (e->uidf) {
e                1356 drivers/iommu/amd_iommu_init.c 				if (e->uidl != 0)
e                1362 drivers/iommu/amd_iommu_init.c 				sprintf(uid, "%d", e->uid);
e                1367 drivers/iommu/amd_iommu_init.c 				memcpy(uid, &e->uid, e->uidl);
e                1368 drivers/iommu/amd_iommu_init.c 				uid[e->uidl] = '\0';
e                1375 drivers/iommu/amd_iommu_init.c 			devid = e->devid;
e                1382 drivers/iommu/amd_iommu_init.c 			flags = e->flags;
e                1393 drivers/iommu/amd_iommu_init.c 			set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
e                2105 drivers/iommu/amd_iommu_init.c 	struct unity_map_entry *e = NULL;
e                2108 drivers/iommu/amd_iommu_init.c 	e = kzalloc(sizeof(*e), GFP_KERNEL);
e                2109 drivers/iommu/amd_iommu_init.c 	if (e == NULL)
e                2117 drivers/iommu/amd_iommu_init.c 		kfree(e);
e                2121 drivers/iommu/amd_iommu_init.c 		e->devid_start = e->devid_end = m->devid;
e                2125 drivers/iommu/amd_iommu_init.c 		e->devid_start = 0;
e                2126 drivers/iommu/amd_iommu_init.c 		e->devid_end = amd_iommu_last_bdf;
e                2130 drivers/iommu/amd_iommu_init.c 		e->devid_start = m->devid;
e                2131 drivers/iommu/amd_iommu_init.c 		e->devid_end = m->aux;
e                2134 drivers/iommu/amd_iommu_init.c 	e->address_start = PAGE_ALIGN(m->range_start);
e                2135 drivers/iommu/amd_iommu_init.c 	e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
e                2136 drivers/iommu/amd_iommu_init.c 	e->prot = m->flags >> 1;
e                2140 drivers/iommu/amd_iommu_init.c 		    PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
e                2141 drivers/iommu/amd_iommu_init.c 		    PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
e                2142 drivers/iommu/amd_iommu_init.c 		    PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
e                2143 drivers/iommu/amd_iommu_init.c 		    e->address_start, e->address_end, m->flags);
e                2145 drivers/iommu/amd_iommu_init.c 	list_add_tail(&e->list, &amd_iommu_unity_map);
e                 515 drivers/iommu/amd_iommu_v2.c static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
e                 231 drivers/iommu/omap-iommu.c static u32 get_iopte_attr(struct iotlb_entry *e)
e                 235 drivers/iommu/omap-iommu.c 	attr = e->mixed << 5;
e                 236 drivers/iommu/omap-iommu.c 	attr |= e->endian;
e                 237 drivers/iommu/omap-iommu.c 	attr |= e->elsz >> 3;
e                 238 drivers/iommu/omap-iommu.c 	attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
e                 239 drivers/iommu/omap-iommu.c 			(e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
e                 313 drivers/iommu/omap-iommu.c 				      struct iotlb_entry *e)
e                 317 drivers/iommu/omap-iommu.c 	if (!e)
e                 320 drivers/iommu/omap-iommu.c 	if (e->da & ~(get_cam_va_mask(e->pgsz))) {
e                 322 drivers/iommu/omap-iommu.c 			e->da);
e                 330 drivers/iommu/omap-iommu.c 	cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
e                 331 drivers/iommu/omap-iommu.c 	cr->ram = e->pa | e->endian | e->elsz | e->mixed;
e                 341 drivers/iommu/omap-iommu.c static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
e                 347 drivers/iommu/omap-iommu.c 	if (!obj || !obj->nr_tlb_entries || !e)
e                 358 drivers/iommu/omap-iommu.c 	if (!e->prsvd) {
e                 378 drivers/iommu/omap-iommu.c 	cr = iotlb_alloc_cr(obj, e);
e                 387 drivers/iommu/omap-iommu.c 	if (e->prsvd)
e                 400 drivers/iommu/omap-iommu.c static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
e                 407 drivers/iommu/omap-iommu.c static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
e                 409 drivers/iommu/omap-iommu.c 	return load_iotlb_entry(obj, e);
e                 633 drivers/iommu/omap-iommu.c iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
e                 639 drivers/iommu/omap-iommu.c 	if (!obj || !e)
e                 642 drivers/iommu/omap-iommu.c 	switch (e->pgsz) {
e                 663 drivers/iommu/omap-iommu.c 	prot = get_iopte_attr(e);
e                 666 drivers/iommu/omap-iommu.c 	err = fn(obj, e->da, e->pa, prot);
e                 678 drivers/iommu/omap-iommu.c omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
e                 682 drivers/iommu/omap-iommu.c 	flush_iotlb_page(obj, e->da);
e                 683 drivers/iommu/omap-iommu.c 	err = iopgtable_store_entry_core(obj, e);
e                 685 drivers/iommu/omap-iommu.c 		prefetch_iotlb_entry(obj, e);
e                1326 drivers/iommu/omap-iommu.c static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
e                1328 drivers/iommu/omap-iommu.c 	memset(e, 0, sizeof(*e));
e                1330 drivers/iommu/omap-iommu.c 	e->da		= da;
e                1331 drivers/iommu/omap-iommu.c 	e->pa		= pa;
e                1332 drivers/iommu/omap-iommu.c 	e->valid	= MMU_CAM_V;
e                1333 drivers/iommu/omap-iommu.c 	e->pgsz		= pgsz;
e                1334 drivers/iommu/omap-iommu.c 	e->endian	= MMU_RAM_ENDIAN_LITTLE;
e                1335 drivers/iommu/omap-iommu.c 	e->elsz		= MMU_RAM_ELSZ_8;
e                1336 drivers/iommu/omap-iommu.c 	e->mixed	= 0;
e                1338 drivers/iommu/omap-iommu.c 	return iopgsz_to_bytes(e->pgsz);
e                1348 drivers/iommu/omap-iommu.c 	struct iotlb_entry e;
e                1361 drivers/iommu/omap-iommu.c 	iotlb_init_entry(&e, da, pa, omap_pgsz);
e                1366 drivers/iommu/omap-iommu.c 		ret = omap_iopgtable_store_entry(oiommu, &e);
e                  98 drivers/irqchip/irq-ti-sci-inta.c #define to_vint_desc(e, i) container_of(e, struct ti_sci_inta_vint_desc, \
e                  90 drivers/isdn/mISDN/dsp_audio.c 	short mu, e, f, y;
e                  94 drivers/isdn/mISDN/dsp_audio.c 	e = (mu & 0x70) / 16;
e                  96 drivers/isdn/mISDN/dsp_audio.c 	y = f * (1 << (e + 3));
e                  97 drivers/isdn/mISDN/dsp_audio.c 	y += etab[e];
e                 294 drivers/leds/leds-aat1290.c 	int i, b = 0, e = AAT1290_MM_CURRENT_SCALE_SIZE;
e                 296 drivers/leds/leds-aat1290.c 	while (e - b > 1) {
e                 297 drivers/leds/leds-aat1290.c 		i = b + (e - b) / 2;
e                 299 drivers/leds/leds-aat1290.c 			e = i;
e                 291 drivers/lightnvm/core.c 				 struct nvm_ioctl_create_extended *e)
e                 293 drivers/lightnvm/core.c 	if (e->lun_begin == 0xFFFF && e->lun_end == 0xFFFF) {
e                 294 drivers/lightnvm/core.c 		e->lun_begin = 0;
e                 295 drivers/lightnvm/core.c 		e->lun_end = dev->geo.all_luns - 1;
e                 299 drivers/lightnvm/core.c 	if (e->op == 0xFFFF) {
e                 300 drivers/lightnvm/core.c 		e->op = NVM_TARGET_DEFAULT_OP;
e                 301 drivers/lightnvm/core.c 	} else if (e->op < NVM_TARGET_MIN_OP || e->op > NVM_TARGET_MAX_OP) {
e                 306 drivers/lightnvm/core.c 	return nvm_config_check_luns(&dev->geo, e->lun_begin, e->lun_end);
e                 311 drivers/lightnvm/core.c 	struct nvm_ioctl_create_extended e;
e                 327 drivers/lightnvm/core.c 		e.lun_begin = create->conf.s.lun_begin;
e                 328 drivers/lightnvm/core.c 		e.lun_end = create->conf.s.lun_end;
e                 329 drivers/lightnvm/core.c 		e.op = NVM_TARGET_DEFAULT_OP;
e                 332 drivers/lightnvm/core.c 		ret = __nvm_config_extended(dev, &create->conf.e);
e                 336 drivers/lightnvm/core.c 		e = create->conf.e;
e                 360 drivers/lightnvm/core.c 	ret = nvm_reserve_luns(dev, e.lun_begin, e.lun_end);
e                 370 drivers/lightnvm/core.c 	tgt_dev = nvm_create_tgt_dev(dev, e.lun_begin, e.lun_end, e.op);
e                 445 drivers/lightnvm/core.c 	nvm_release_luns_err(dev, e.lun_begin, e.lun_end);
e                1346 drivers/lightnvm/core.c 	    create.conf.e.rsv != 0) {
e                 328 drivers/lightnvm/pblk-core.c 	int i, e, nbv = 0;
e                 333 drivers/lightnvm/pblk-core.c 		for (e = 0; e < bv->bv_len; e += PBLK_EXPOSED_PAGE_SIZE, nbv++)
e                  27 drivers/md/bcache/util.c 	char *e;						\
e                  28 drivers/md/bcache/util.c 	type i = simple_ ## name(cp, &e, 10);			\
e                  30 drivers/md/bcache/util.c 	switch (tolower(*e)) {					\
e                  54 drivers/md/bcache/util.c 		if (e++ == cp)					\
e                  59 drivers/md/bcache/util.c 		if (*e == '\n')					\
e                  60 drivers/md/bcache/util.c 			e++;					\
e                  63 drivers/md/bcache/util.c 	if (*e)							\
e                  87 drivers/md/dm-cache-policy-smq.c 	struct entry *e;
e                  89 drivers/md/dm-cache-policy-smq.c 	e = es->begin + block;
e                  90 drivers/md/dm-cache-policy-smq.c 	BUG_ON(e >= es->end);
e                  92 drivers/md/dm-cache-policy-smq.c 	return e;
e                  95 drivers/md/dm-cache-policy-smq.c static unsigned to_index(struct entry_space *es, struct entry *e)
e                  97 drivers/md/dm-cache-policy-smq.c 	BUG_ON(e < es->begin || e >= es->end);
e                  98 drivers/md/dm-cache-policy-smq.c 	return e - es->begin;
e                 132 drivers/md/dm-cache-policy-smq.c static struct entry *l_next(struct entry_space *es, struct entry *e)
e                 134 drivers/md/dm-cache-policy-smq.c 	return to_entry(es, e->next);
e                 137 drivers/md/dm-cache-policy-smq.c static struct entry *l_prev(struct entry_space *es, struct entry *e)
e                 139 drivers/md/dm-cache-policy-smq.c 	return to_entry(es, e->prev);
e                 147 drivers/md/dm-cache-policy-smq.c static void l_add_head(struct entry_space *es, struct ilist *l, struct entry *e)
e                 151 drivers/md/dm-cache-policy-smq.c 	e->next = l->head;
e                 152 drivers/md/dm-cache-policy-smq.c 	e->prev = INDEXER_NULL;
e                 155 drivers/md/dm-cache-policy-smq.c 		head->prev = l->head = to_index(es, e);
e                 157 drivers/md/dm-cache-policy-smq.c 		l->head = l->tail = to_index(es, e);
e                 159 drivers/md/dm-cache-policy-smq.c 	if (!e->sentinel)
e                 163 drivers/md/dm-cache-policy-smq.c static void l_add_tail(struct entry_space *es, struct ilist *l, struct entry *e)
e                 167 drivers/md/dm-cache-policy-smq.c 	e->next = INDEXER_NULL;
e                 168 drivers/md/dm-cache-policy-smq.c 	e->prev = l->tail;
e                 171 drivers/md/dm-cache-policy-smq.c 		tail->next = l->tail = to_index(es, e);
e                 173 drivers/md/dm-cache-policy-smq.c 		l->head = l->tail = to_index(es, e);
e                 175 drivers/md/dm-cache-policy-smq.c 	if (!e->sentinel)
e                 180 drivers/md/dm-cache-policy-smq.c 			 struct entry *old, struct entry *e)
e                 185 drivers/md/dm-cache-policy-smq.c 		l_add_head(es, l, e);
e                 188 drivers/md/dm-cache-policy-smq.c 		e->prev = old->prev;
e                 189 drivers/md/dm-cache-policy-smq.c 		e->next = to_index(es, old);
e                 190 drivers/md/dm-cache-policy-smq.c 		prev->next = old->prev = to_index(es, e);
e                 192 drivers/md/dm-cache-policy-smq.c 		if (!e->sentinel)
e                 197 drivers/md/dm-cache-policy-smq.c static void l_del(struct entry_space *es, struct ilist *l, struct entry *e)
e                 199 drivers/md/dm-cache-policy-smq.c 	struct entry *prev = l_prev(es, e);
e                 200 drivers/md/dm-cache-policy-smq.c 	struct entry *next = l_next(es, e);
e                 203 drivers/md/dm-cache-policy-smq.c 		prev->next = e->next;
e                 205 drivers/md/dm-cache-policy-smq.c 		l->head = e->next;
e                 208 drivers/md/dm-cache-policy-smq.c 		next->prev = e->prev;
e                 210 drivers/md/dm-cache-policy-smq.c 		l->tail = e->prev;
e                 212 drivers/md/dm-cache-policy-smq.c 	if (!e->sentinel)
e                 218 drivers/md/dm-cache-policy-smq.c 	struct entry *e;
e                 220 drivers/md/dm-cache-policy-smq.c 	for (e = l_head(es, l); e; e = l_next(es, e))
e                 221 drivers/md/dm-cache-policy-smq.c 		if (!e->sentinel) {
e                 222 drivers/md/dm-cache-policy-smq.c 			l_del(es, l, e);
e                 223 drivers/md/dm-cache-policy-smq.c 			return e;
e                 231 drivers/md/dm-cache-policy-smq.c 	struct entry *e;
e                 233 drivers/md/dm-cache-policy-smq.c 	for (e = l_tail(es, l); e; e = l_prev(es, e))
e                 234 drivers/md/dm-cache-policy-smq.c 		if (!e->sentinel) {
e                 235 drivers/md/dm-cache-policy-smq.c 			l_del(es, l, e);
e                 236 drivers/md/dm-cache-policy-smq.c 			return e;
e                 295 drivers/md/dm-cache-policy-smq.c static void q_push(struct queue *q, struct entry *e)
e                 297 drivers/md/dm-cache-policy-smq.c 	BUG_ON(e->pending_work);
e                 299 drivers/md/dm-cache-policy-smq.c 	if (!e->sentinel)
e                 302 drivers/md/dm-cache-policy-smq.c 	l_add_tail(q->es, q->qs + e->level, e);
e                 305 drivers/md/dm-cache-policy-smq.c static void q_push_front(struct queue *q, struct entry *e)
e                 307 drivers/md/dm-cache-policy-smq.c 	BUG_ON(e->pending_work);
e                 309 drivers/md/dm-cache-policy-smq.c 	if (!e->sentinel)
e                 312 drivers/md/dm-cache-policy-smq.c 	l_add_head(q->es, q->qs + e->level, e);
e                 315 drivers/md/dm-cache-policy-smq.c static void q_push_before(struct queue *q, struct entry *old, struct entry *e)
e                 317 drivers/md/dm-cache-policy-smq.c 	BUG_ON(e->pending_work);
e                 319 drivers/md/dm-cache-policy-smq.c 	if (!e->sentinel)
e                 322 drivers/md/dm-cache-policy-smq.c 	l_add_before(q->es, q->qs + e->level, old, e);
e                 325 drivers/md/dm-cache-policy-smq.c static void q_del(struct queue *q, struct entry *e)
e                 327 drivers/md/dm-cache-policy-smq.c 	l_del(q->es, q->qs + e->level, e);
e                 328 drivers/md/dm-cache-policy-smq.c 	if (!e->sentinel)
e                 338 drivers/md/dm-cache-policy-smq.c 	struct entry *e;
e                 343 drivers/md/dm-cache-policy-smq.c 		for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) {
e                 344 drivers/md/dm-cache-policy-smq.c 			if (e->sentinel) {
e                 351 drivers/md/dm-cache-policy-smq.c 			return e;
e                 359 drivers/md/dm-cache-policy-smq.c 	struct entry *e = q_peek(q, q->nr_levels, true);
e                 361 drivers/md/dm-cache-policy-smq.c 	if (e)
e                 362 drivers/md/dm-cache-policy-smq.c 		q_del(q, e);
e                 364 drivers/md/dm-cache-policy-smq.c 	return e;
e                 374 drivers/md/dm-cache-policy-smq.c 	struct entry *e;
e                 377 drivers/md/dm-cache-policy-smq.c 		for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e))
e                 378 drivers/md/dm-cache-policy-smq.c 			if (!e->sentinel) {
e                 379 drivers/md/dm-cache-policy-smq.c 				l_del(q->es, q->qs + e->level, e);
e                 380 drivers/md/dm-cache-policy-smq.c 				return e;
e                 431 drivers/md/dm-cache-policy-smq.c 	struct entry *e;
e                 443 drivers/md/dm-cache-policy-smq.c 			e = __redist_pop_from(q, level + 1u);
e                 444 drivers/md/dm-cache-policy-smq.c 			if (!e) {
e                 449 drivers/md/dm-cache-policy-smq.c 			e->level = level;
e                 450 drivers/md/dm-cache-policy-smq.c 			l_add_tail(q->es, l, e);
e                 458 drivers/md/dm-cache-policy-smq.c 			e = l_pop_tail(q->es, l);
e                 460 drivers/md/dm-cache-policy-smq.c 			if (!e)
e                 464 drivers/md/dm-cache-policy-smq.c 			e->level = level + 1u;
e                 465 drivers/md/dm-cache-policy-smq.c 			l_add_tail(q->es, l_above, e);
e                 470 drivers/md/dm-cache-policy-smq.c static void q_requeue(struct queue *q, struct entry *e, unsigned extra_levels,
e                 475 drivers/md/dm-cache-policy-smq.c 	unsigned new_level = min(q->nr_levels - 1u, e->level + extra_levels);
e                 478 drivers/md/dm-cache-policy-smq.c 	if (extra_levels && (e->level < q->nr_levels - 1u)) {
e                 484 drivers/md/dm-cache-policy-smq.c 			de->level = e->level;
e                 503 drivers/md/dm-cache-policy-smq.c 	q_del(q, e);
e                 504 drivers/md/dm-cache-policy-smq.c 	e->level = new_level;
e                 505 drivers/md/dm-cache-policy-smq.c 	q_push(q, e);
e                 611 drivers/md/dm-cache-policy-smq.c static struct entry *h_next(struct smq_hash_table *ht, struct entry *e)
e                 613 drivers/md/dm-cache-policy-smq.c 	return to_entry(ht->es, e->hash_next);
e                 616 drivers/md/dm-cache-policy-smq.c static void __h_insert(struct smq_hash_table *ht, unsigned bucket, struct entry *e)
e                 618 drivers/md/dm-cache-policy-smq.c 	e->hash_next = ht->buckets[bucket];
e                 619 drivers/md/dm-cache-policy-smq.c 	ht->buckets[bucket] = to_index(ht->es, e);
e                 622 drivers/md/dm-cache-policy-smq.c static void h_insert(struct smq_hash_table *ht, struct entry *e)
e                 624 drivers/md/dm-cache-policy-smq.c 	unsigned h = hash_64(from_oblock(e->oblock), ht->hash_bits);
e                 625 drivers/md/dm-cache-policy-smq.c 	__h_insert(ht, h, e);
e                 631 drivers/md/dm-cache-policy-smq.c 	struct entry *e;
e                 634 drivers/md/dm-cache-policy-smq.c 	for (e = h_head(ht, h); e; e = h_next(ht, e)) {
e                 635 drivers/md/dm-cache-policy-smq.c 		if (e->oblock == oblock)
e                 636 drivers/md/dm-cache-policy-smq.c 			return e;
e                 638 drivers/md/dm-cache-policy-smq.c 		*prev = e;
e                 645 drivers/md/dm-cache-policy-smq.c 		       struct entry *e, struct entry *prev)
e                 648 drivers/md/dm-cache-policy-smq.c 		prev->hash_next = e->hash_next;
e                 650 drivers/md/dm-cache-policy-smq.c 		ht->buckets[h] = e->hash_next;
e                 658 drivers/md/dm-cache-policy-smq.c 	struct entry *e, *prev;
e                 661 drivers/md/dm-cache-policy-smq.c 	e = __h_lookup(ht, h, oblock, &prev);
e                 662 drivers/md/dm-cache-policy-smq.c 	if (e && prev) {
e                 667 drivers/md/dm-cache-policy-smq.c 		__h_unlink(ht, h, e, prev);
e                 668 drivers/md/dm-cache-policy-smq.c 		__h_insert(ht, h, e);
e                 671 drivers/md/dm-cache-policy-smq.c 	return e;
e                 674 drivers/md/dm-cache-policy-smq.c static void h_remove(struct smq_hash_table *ht, struct entry *e)
e                 676 drivers/md/dm-cache-policy-smq.c 	unsigned h = hash_64(from_oblock(e->oblock), ht->hash_bits);
e                 683 drivers/md/dm-cache-policy-smq.c 	e = __h_lookup(ht, h, e->oblock, &prev);
e                 684 drivers/md/dm-cache-policy-smq.c 	if (e)
e                 685 drivers/md/dm-cache-policy-smq.c 		__h_unlink(ht, h, e, prev);
e                 712 drivers/md/dm-cache-policy-smq.c static void init_entry(struct entry *e)
e                 718 drivers/md/dm-cache-policy-smq.c 	e->hash_next = INDEXER_NULL;
e                 719 drivers/md/dm-cache-policy-smq.c 	e->next = INDEXER_NULL;
e                 720 drivers/md/dm-cache-policy-smq.c 	e->prev = INDEXER_NULL;
e                 721 drivers/md/dm-cache-policy-smq.c 	e->level = 0u;
e                 722 drivers/md/dm-cache-policy-smq.c 	e->dirty = true;	/* FIXME: audit */
e                 723 drivers/md/dm-cache-policy-smq.c 	e->allocated = true;
e                 724 drivers/md/dm-cache-policy-smq.c 	e->sentinel = false;
e                 725 drivers/md/dm-cache-policy-smq.c 	e->pending_work = false;
e                 730 drivers/md/dm-cache-policy-smq.c 	struct entry *e;
e                 735 drivers/md/dm-cache-policy-smq.c 	e = l_pop_head(ea->es, &ea->free);
e                 736 drivers/md/dm-cache-policy-smq.c 	init_entry(e);
e                 739 drivers/md/dm-cache-policy-smq.c 	return e;
e                 747 drivers/md/dm-cache-policy-smq.c 	struct entry *e = __get_entry(ea->es, ea->begin + i);
e                 749 drivers/md/dm-cache-policy-smq.c 	BUG_ON(e->allocated);
e                 751 drivers/md/dm-cache-policy-smq.c 	l_del(ea->es, &ea->free, e);
e                 752 drivers/md/dm-cache-policy-smq.c 	init_entry(e);
e                 755 drivers/md/dm-cache-policy-smq.c 	return e;
e                 758 drivers/md/dm-cache-policy-smq.c static void free_entry(struct entry_alloc *ea, struct entry *e)
e                 761 drivers/md/dm-cache-policy-smq.c 	BUG_ON(!e->allocated);
e                 764 drivers/md/dm-cache-policy-smq.c 	e->allocated = false;
e                 765 drivers/md/dm-cache-policy-smq.c 	l_add_tail(ea->es, &ea->free, e);
e                 773 drivers/md/dm-cache-policy-smq.c static unsigned get_index(struct entry_alloc *ea, struct entry *e)
e                 775 drivers/md/dm-cache-policy-smq.c 	return to_index(ea->es, e) - ea->begin;
e                 950 drivers/md/dm-cache-policy-smq.c static void del_queue(struct smq_policy *mq, struct entry *e)
e                 952 drivers/md/dm-cache-policy-smq.c 	q_del(e->dirty ? &mq->dirty : &mq->clean, e);
e                 955 drivers/md/dm-cache-policy-smq.c static void push_queue(struct smq_policy *mq, struct entry *e)
e                 957 drivers/md/dm-cache-policy-smq.c 	if (e->dirty)
e                 958 drivers/md/dm-cache-policy-smq.c 		q_push(&mq->dirty, e);
e                 960 drivers/md/dm-cache-policy-smq.c 		q_push(&mq->clean, e);
e                 964 drivers/md/dm-cache-policy-smq.c static void push(struct smq_policy *mq, struct entry *e)
e                 966 drivers/md/dm-cache-policy-smq.c 	h_insert(&mq->table, e);
e                 967 drivers/md/dm-cache-policy-smq.c 	if (!e->pending_work)
e                 968 drivers/md/dm-cache-policy-smq.c 		push_queue(mq, e);
e                 971 drivers/md/dm-cache-policy-smq.c static void push_queue_front(struct smq_policy *mq, struct entry *e)
e                 973 drivers/md/dm-cache-policy-smq.c 	if (e->dirty)
e                 974 drivers/md/dm-cache-policy-smq.c 		q_push_front(&mq->dirty, e);
e                 976 drivers/md/dm-cache-policy-smq.c 		q_push_front(&mq->clean, e);
e                 979 drivers/md/dm-cache-policy-smq.c static void push_front(struct smq_policy *mq, struct entry *e)
e                 981 drivers/md/dm-cache-policy-smq.c 	h_insert(&mq->table, e);
e                 982 drivers/md/dm-cache-policy-smq.c 	if (!e->pending_work)
e                 983 drivers/md/dm-cache-policy-smq.c 		push_queue_front(mq, e);
e                 986 drivers/md/dm-cache-policy-smq.c static dm_cblock_t infer_cblock(struct smq_policy *mq, struct entry *e)
e                 988 drivers/md/dm-cache-policy-smq.c 	return to_cblock(get_index(&mq->cache_alloc, e));
e                 991 drivers/md/dm-cache-policy-smq.c static void requeue(struct smq_policy *mq, struct entry *e)
e                 996 drivers/md/dm-cache-policy-smq.c 	if (e->pending_work)
e                 999 drivers/md/dm-cache-policy-smq.c 	if (!test_and_set_bit(from_cblock(infer_cblock(mq, e)), mq->cache_hit_bits)) {
e                1000 drivers/md/dm-cache-policy-smq.c 		if (!e->dirty) {
e                1001 drivers/md/dm-cache-policy-smq.c 			q_requeue(&mq->clean, e, 1u, NULL, NULL);
e                1005 drivers/md/dm-cache-policy-smq.c 		q_requeue(&mq->dirty, e, 1u,
e                1006 drivers/md/dm-cache-policy-smq.c 			  get_sentinel(&mq->writeback_sentinel_alloc, e->level, !mq->current_writeback_sentinels),
e                1007 drivers/md/dm-cache-policy-smq.c 			  get_sentinel(&mq->writeback_sentinel_alloc, e->level, mq->current_writeback_sentinels));
e                1160 drivers/md/dm-cache-policy-smq.c static void mark_pending(struct smq_policy *mq, struct entry *e)
e                1162 drivers/md/dm-cache-policy-smq.c 	BUG_ON(e->sentinel);
e                1163 drivers/md/dm-cache-policy-smq.c 	BUG_ON(!e->allocated);
e                1164 drivers/md/dm-cache-policy-smq.c 	BUG_ON(e->pending_work);
e                1165 drivers/md/dm-cache-policy-smq.c 	e->pending_work = true;
e                1168 drivers/md/dm-cache-policy-smq.c static void clear_pending(struct smq_policy *mq, struct entry *e)
e                1170 drivers/md/dm-cache-policy-smq.c 	BUG_ON(!e->pending_work);
e                1171 drivers/md/dm-cache-policy-smq.c 	e->pending_work = false;
e                1178 drivers/md/dm-cache-policy-smq.c 	struct entry *e;
e                1180 drivers/md/dm-cache-policy-smq.c 	e = q_peek(&mq->dirty, mq->dirty.nr_levels, idle);
e                1181 drivers/md/dm-cache-policy-smq.c 	if (e) {
e                1182 drivers/md/dm-cache-policy-smq.c 		mark_pending(mq, e);
e                1183 drivers/md/dm-cache-policy-smq.c 		q_del(&mq->dirty, e);
e                1186 drivers/md/dm-cache-policy-smq.c 		work.oblock = e->oblock;
e                1187 drivers/md/dm-cache-policy-smq.c 		work.cblock = infer_cblock(mq, e);
e                1191 drivers/md/dm-cache-policy-smq.c 			clear_pending(mq, e);
e                1192 drivers/md/dm-cache-policy-smq.c 			q_push_front(&mq->dirty, e);
e                1201 drivers/md/dm-cache-policy-smq.c 	struct entry *e;
e                1206 drivers/md/dm-cache-policy-smq.c 	e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true);
e                1207 drivers/md/dm-cache-policy-smq.c 	if (!e) {
e                1213 drivers/md/dm-cache-policy-smq.c 	mark_pending(mq, e);
e                1214 drivers/md/dm-cache-policy-smq.c 	q_del(&mq->clean, e);
e                1217 drivers/md/dm-cache-policy-smq.c 	work.oblock = e->oblock;
e                1218 drivers/md/dm-cache-policy-smq.c 	work.cblock = infer_cblock(mq, e);
e                1221 drivers/md/dm-cache-policy-smq.c 		clear_pending(mq, e);
e                1222 drivers/md/dm-cache-policy-smq.c 		q_push_front(&mq->clean, e);
e                1230 drivers/md/dm-cache-policy-smq.c 	struct entry *e;
e                1253 drivers/md/dm-cache-policy-smq.c 	e = alloc_entry(&mq->cache_alloc);
e                1254 drivers/md/dm-cache-policy-smq.c 	BUG_ON(!e);
e                1255 drivers/md/dm-cache-policy-smq.c 	e->pending_work = true;
e                1258 drivers/md/dm-cache-policy-smq.c 	work.cblock = infer_cblock(mq, e);
e                1261 drivers/md/dm-cache-policy-smq.c 		free_entry(&mq->cache_alloc, e);
e                1303 drivers/md/dm-cache-policy-smq.c 	struct entry *e = h_lookup(&mq->hotspot_table, hb);
e                1305 drivers/md/dm-cache-policy-smq.c 	if (e) {
e                1306 drivers/md/dm-cache-policy-smq.c 		stats_level_accessed(&mq->hotspot_stats, e->level);
e                1308 drivers/md/dm-cache-policy-smq.c 		hi = get_index(&mq->hotspot_alloc, e);
e                1309 drivers/md/dm-cache-policy-smq.c 		q_requeue(&mq->hotspot, e,
e                1317 drivers/md/dm-cache-policy-smq.c 		e = alloc_entry(&mq->hotspot_alloc);
e                1318 drivers/md/dm-cache-policy-smq.c 		if (!e) {
e                1319 drivers/md/dm-cache-policy-smq.c 			e = q_pop(&mq->hotspot);
e                1320 drivers/md/dm-cache-policy-smq.c 			if (e) {
e                1321 drivers/md/dm-cache-policy-smq.c 				h_remove(&mq->hotspot_table, e);
e                1322 drivers/md/dm-cache-policy-smq.c 				hi = get_index(&mq->hotspot_alloc, e);
e                1328 drivers/md/dm-cache-policy-smq.c 		if (e) {
e                1329 drivers/md/dm-cache-policy-smq.c 			e->oblock = hb;
e                1330 drivers/md/dm-cache-policy-smq.c 			q_push(&mq->hotspot, e);
e                1331 drivers/md/dm-cache-policy-smq.c 			h_insert(&mq->hotspot_table, e);
e                1335 drivers/md/dm-cache-policy-smq.c 	return e;
e                1369 drivers/md/dm-cache-policy-smq.c 	struct entry *e, *hs_e;
e                1374 drivers/md/dm-cache-policy-smq.c 	e = h_lookup(&mq->table, oblock);
e                1375 drivers/md/dm-cache-policy-smq.c 	if (e) {
e                1376 drivers/md/dm-cache-policy-smq.c 		stats_level_accessed(&mq->cache_stats, e->level);
e                1378 drivers/md/dm-cache-policy-smq.c 		requeue(mq, e);
e                1379 drivers/md/dm-cache-policy-smq.c 		*cblock = infer_cblock(mq, e);
e                1462 drivers/md/dm-cache-policy-smq.c 	struct entry *e = get_entry(&mq->cache_alloc,
e                1468 drivers/md/dm-cache-policy-smq.c 		clear_pending(mq, e);
e                1470 drivers/md/dm-cache-policy-smq.c 			e->oblock = work->oblock;
e                1471 drivers/md/dm-cache-policy-smq.c 			e->level = NR_CACHE_LEVELS - 1;
e                1472 drivers/md/dm-cache-policy-smq.c 			push(mq, e);
e                1475 drivers/md/dm-cache-policy-smq.c 			free_entry(&mq->cache_alloc, e);
e                1483 drivers/md/dm-cache-policy-smq.c 			h_remove(&mq->table, e);
e                1484 drivers/md/dm-cache-policy-smq.c 			free_entry(&mq->cache_alloc, e);
e                1487 drivers/md/dm-cache-policy-smq.c 			clear_pending(mq, e);
e                1488 drivers/md/dm-cache-policy-smq.c 			push_queue(mq, e);
e                1495 drivers/md/dm-cache-policy-smq.c 		clear_pending(mq, e);
e                1496 drivers/md/dm-cache-policy-smq.c 		push_queue(mq, e);
e                1519 drivers/md/dm-cache-policy-smq.c 	struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
e                1521 drivers/md/dm-cache-policy-smq.c 	if (e->pending_work)
e                1522 drivers/md/dm-cache-policy-smq.c 		e->dirty = set;
e                1524 drivers/md/dm-cache-policy-smq.c 		del_queue(mq, e);
e                1525 drivers/md/dm-cache-policy-smq.c 		e->dirty = set;
e                1526 drivers/md/dm-cache-policy-smq.c 		push_queue(mq, e);
e                1560 drivers/md/dm-cache-policy-smq.c 	struct entry *e;
e                1562 drivers/md/dm-cache-policy-smq.c 	e = alloc_particular_entry(&mq->cache_alloc, from_cblock(cblock));
e                1563 drivers/md/dm-cache-policy-smq.c 	e->oblock = oblock;
e                1564 drivers/md/dm-cache-policy-smq.c 	e->dirty = dirty;
e                1565 drivers/md/dm-cache-policy-smq.c 	e->level = hint_valid ? min(hint, NR_CACHE_LEVELS - 1) : random_level(cblock);
e                1566 drivers/md/dm-cache-policy-smq.c 	e->pending_work = false;
e                1572 drivers/md/dm-cache-policy-smq.c 	push_front(mq, e);
e                1580 drivers/md/dm-cache-policy-smq.c 	struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
e                1582 drivers/md/dm-cache-policy-smq.c 	if (!e->allocated)
e                1586 drivers/md/dm-cache-policy-smq.c 	del_queue(mq, e);
e                1587 drivers/md/dm-cache-policy-smq.c 	h_remove(&mq->table, e);
e                1588 drivers/md/dm-cache-policy-smq.c 	free_entry(&mq->cache_alloc, e);
e                1595 drivers/md/dm-cache-policy-smq.c 	struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
e                1597 drivers/md/dm-cache-policy-smq.c 	if (!e->allocated)
e                1600 drivers/md/dm-cache-policy-smq.c 	return e->level;
e                1102 drivers/md/dm-cache-target.c 				     dm_dblock_t *b, dm_dblock_t *e)
e                1110 drivers/md/dm-cache-target.c 		*e = *b;
e                1112 drivers/md/dm-cache-target.c 		*e = to_dblock(block_div(se, cache->discard_block_size));
e                1868 drivers/md/dm-cache-target.c 	dm_dblock_t b, e;
e                1873 drivers/md/dm-cache-target.c 	calc_discard_block_range(cache, bio, &b, &e);
e                1874 drivers/md/dm-cache-target.c 	while (b != e) {
e                2929 drivers/md/dm-cache-target.c 	sector_t b, e;
e                2938 drivers/md/dm-cache-target.c 	e = li->discard_end * li->block_size;
e                2944 drivers/md/dm-cache-target.c 	sector_div(e, li->cache->discard_block_size);
e                2950 drivers/md/dm-cache-target.c 	if (e > from_dblock(li->cache->discard_nr_blocks))
e                2951 drivers/md/dm-cache-target.c 		e = from_dblock(li->cache->discard_nr_blocks);
e                2953 drivers/md/dm-cache-target.c 	for (; b < e; b++)
e                3271 drivers/md/dm-cache-target.c 	uint64_t b, e;
e                3277 drivers/md/dm-cache-target.c 	r = sscanf(str, "%llu-%llu%c", &b, &e, &dummy);
e                3283 drivers/md/dm-cache-target.c 		result->end = to_cblock(e);
e                3307 drivers/md/dm-cache-target.c 	uint64_t e = from_cblock(range->end);
e                3316 drivers/md/dm-cache-target.c 	if (e > n) {
e                3318 drivers/md/dm-cache-target.c 		      cache_device_name(cache), e, n);
e                3322 drivers/md/dm-cache-target.c 	if (b >= e) {
e                3324 drivers/md/dm-cache-target.c 		      cache_device_name(cache), b, e);
e                 691 drivers/md/dm-era-target.c 	unsigned b, e = min(d->current_bit + INSERTS_PER_STEP, d->nr_bits);
e                 693 drivers/md/dm-era-target.c 	for (b = d->current_bit; b < e; b++) {
e                  67 drivers/md/dm-exception-store.h 				  struct dm_exception *e);
e                  73 drivers/md/dm-exception-store.h 				  struct dm_exception *e, int valid,
e                 147 drivers/md/dm-exception-store.h static inline unsigned dm_consecutive_chunk_count(struct dm_exception *e)
e                 149 drivers/md/dm-exception-store.h 	return e->new_chunk >> DM_CHUNK_NUMBER_BITS;
e                 152 drivers/md/dm-exception-store.h static inline void dm_consecutive_chunk_count_inc(struct dm_exception *e)
e                 154 drivers/md/dm-exception-store.h 	e->new_chunk += (1ULL << DM_CHUNK_NUMBER_BITS);
e                 156 drivers/md/dm-exception-store.h 	BUG_ON(!dm_consecutive_chunk_count(e));
e                 159 drivers/md/dm-exception-store.h static inline void dm_consecutive_chunk_count_dec(struct dm_exception *e)
e                 161 drivers/md/dm-exception-store.h 	BUG_ON(!dm_consecutive_chunk_count(e));
e                 163 drivers/md/dm-exception-store.h 	e->new_chunk -= (1ULL << DM_CHUNK_NUMBER_BITS);
e                 627 drivers/md/dm-integrity.c 				 bool e, const char *function)
e                 630 drivers/md/dm-integrity.c 	unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
e                 425 drivers/md/dm-snap-persistent.c 			    uint32_t index, struct core_exception *e)
e                 430 drivers/md/dm-snap-persistent.c 	de->old_chunk = cpu_to_le64(e->old_chunk);
e                 431 drivers/md/dm-snap-persistent.c 	de->new_chunk = cpu_to_le64(e->new_chunk);
e                 456 drivers/md/dm-snap-persistent.c 	struct core_exception e;
e                 462 drivers/md/dm-snap-persistent.c 		read_exception(ps, ps_area, i, &e);
e                 470 drivers/md/dm-snap-persistent.c 		if (e.new_chunk == 0LL) {
e                 479 drivers/md/dm-snap-persistent.c 		if (ps->next_free <= e.new_chunk)
e                 480 drivers/md/dm-snap-persistent.c 			ps->next_free = e.new_chunk + 1;
e                 485 drivers/md/dm-snap-persistent.c 		r = callback(callback_context, e.old_chunk, e.new_chunk);
e                 677 drivers/md/dm-snap-persistent.c 					struct dm_exception *e)
e                 686 drivers/md/dm-snap-persistent.c 	e->new_chunk = ps->next_free;
e                 700 drivers/md/dm-snap-persistent.c 					struct dm_exception *e, int valid,
e                 712 drivers/md/dm-snap-persistent.c 	ce.old_chunk = e->old_chunk;
e                 713 drivers/md/dm-snap-persistent.c 	ce.new_chunk = e->new_chunk;
e                  40 drivers/md/dm-snap-transient.c 				       struct dm_exception *e)
e                  48 drivers/md/dm-snap-transient.c 	e->new_chunk = sector_to_chunk(store, tc->next_free);
e                  55 drivers/md/dm-snap-transient.c 				       struct dm_exception *e, int valid,
e                 199 drivers/md/dm-snap.c 	struct dm_exception e;
e                 695 drivers/md/dm-snap.c static void dm_remove_exception(struct dm_exception *e)
e                 697 drivers/md/dm-snap.c 	hlist_bl_del(&e->hash_list);
e                 709 drivers/md/dm-snap.c 	struct dm_exception *e;
e                 712 drivers/md/dm-snap.c 	hlist_bl_for_each_entry(e, pos, slot, hash_list)
e                 713 drivers/md/dm-snap.c 		if (chunk >= e->old_chunk &&
e                 714 drivers/md/dm-snap.c 		    chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
e                 715 drivers/md/dm-snap.c 			return e;
e                 722 drivers/md/dm-snap.c 	struct dm_exception *e;
e                 724 drivers/md/dm-snap.c 	e = kmem_cache_alloc(exception_cache, gfp);
e                 725 drivers/md/dm-snap.c 	if (!e && gfp == GFP_NOIO)
e                 726 drivers/md/dm-snap.c 		e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
e                 728 drivers/md/dm-snap.c 	return e;
e                 731 drivers/md/dm-snap.c static void free_completed_exception(struct dm_exception *e)
e                 733 drivers/md/dm-snap.c 	kmem_cache_free(exception_cache, e);
e                 761 drivers/md/dm-snap.c 	struct dm_exception *e = NULL;
e                 770 drivers/md/dm-snap.c 	hlist_bl_for_each_entry(e, pos, l, hash_list) {
e                 772 drivers/md/dm-snap.c 		if (new_e->old_chunk == (e->old_chunk +
e                 773 drivers/md/dm-snap.c 					 dm_consecutive_chunk_count(e) + 1) &&
e                 774 drivers/md/dm-snap.c 		    new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
e                 775 drivers/md/dm-snap.c 					 dm_consecutive_chunk_count(e) + 1)) {
e                 776 drivers/md/dm-snap.c 			dm_consecutive_chunk_count_inc(e);
e                 782 drivers/md/dm-snap.c 		if (new_e->old_chunk == (e->old_chunk - 1) &&
e                 783 drivers/md/dm-snap.c 		    new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
e                 784 drivers/md/dm-snap.c 			dm_consecutive_chunk_count_inc(e);
e                 785 drivers/md/dm-snap.c 			e->old_chunk--;
e                 786 drivers/md/dm-snap.c 			e->new_chunk--;
e                 791 drivers/md/dm-snap.c 		if (new_e->old_chunk < e->old_chunk)
e                 796 drivers/md/dm-snap.c 	if (!e) {
e                 802 drivers/md/dm-snap.c 	} else if (new_e->old_chunk < e->old_chunk) {
e                 804 drivers/md/dm-snap.c 		hlist_bl_add_before(&new_e->hash_list, &e->hash_list);
e                 807 drivers/md/dm-snap.c 		hlist_bl_add_behind(&new_e->hash_list, &e->hash_list);
e                 819 drivers/md/dm-snap.c 	struct dm_exception *e;
e                 821 drivers/md/dm-snap.c 	e = alloc_completed_exception(GFP_KERNEL);
e                 822 drivers/md/dm-snap.c 	if (!e)
e                 825 drivers/md/dm-snap.c 	e->old_chunk = old;
e                 828 drivers/md/dm-snap.c 	e->new_chunk = new;
e                 839 drivers/md/dm-snap.c 	dm_insert_exception(&s->complete, e);
e                 935 drivers/md/dm-snap.c 	struct dm_exception *e;
e                 937 drivers/md/dm-snap.c 	e = dm_lookup_exception(&s->complete, old_chunk);
e                 938 drivers/md/dm-snap.c 	if (!e) {
e                 948 drivers/md/dm-snap.c 	if (!dm_consecutive_chunk_count(e)) {
e                 949 drivers/md/dm-snap.c 		dm_remove_exception(e);
e                 950 drivers/md/dm-snap.c 		free_completed_exception(e);
e                 962 drivers/md/dm-snap.c 	if (old_chunk == e->old_chunk) {
e                 963 drivers/md/dm-snap.c 		e->old_chunk++;
e                 964 drivers/md/dm-snap.c 		e->new_chunk++;
e                 965 drivers/md/dm-snap.c 	} else if (old_chunk != e->old_chunk +
e                 966 drivers/md/dm-snap.c 		   dm_consecutive_chunk_count(e)) {
e                 970 drivers/md/dm-snap.c 		      (unsigned long long)e->old_chunk,
e                 972 drivers/md/dm-snap.c 		      e->old_chunk + dm_consecutive_chunk_count(e));
e                 976 drivers/md/dm-snap.c 	dm_consecutive_chunk_count_dec(e);
e                1639 drivers/md/dm-snap.c 	struct dm_exception *e;
e                1647 drivers/md/dm-snap.c 	dm_exception_table_lock_init(s, pe->e.old_chunk, &lock);
e                1658 drivers/md/dm-snap.c 	e = alloc_completed_exception(GFP_NOIO);
e                1659 drivers/md/dm-snap.c 	if (!e) {
e                1666 drivers/md/dm-snap.c 	*e = pe->e;
e                1672 drivers/md/dm-snap.c 		free_completed_exception(e);
e                1685 drivers/md/dm-snap.c 	dm_insert_exception(&s->complete, e);
e                1689 drivers/md/dm-snap.c 	if (__chunk_is_tracked(s, pe->e.old_chunk)) {
e                1691 drivers/md/dm-snap.c 		__check_for_conflicting_io(s, pe->e.old_chunk);
e                1697 drivers/md/dm-snap.c 	dm_remove_exception(&pe->e);
e                1729 drivers/md/dm-snap.c 	s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error,
e                1797 drivers/md/dm-snap.c 	src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
e                1801 drivers/md/dm-snap.c 	dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
e                1838 drivers/md/dm-snap.c 	struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
e                1840 drivers/md/dm-snap.c 	if (!e)
e                1843 drivers/md/dm-snap.c 	return container_of(e, struct dm_snap_pending_exception, e);
e                1856 drivers/md/dm-snap.c 	pe->e.old_chunk = chunk;
e                1863 drivers/md/dm-snap.c 	if (s->store->type->prepare_exception(s->store, &pe->e)) {
e                1872 drivers/md/dm-snap.c 	dm_insert_exception(&s->pending, &pe->e);
e                1900 drivers/md/dm-snap.c static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
e                1905 drivers/md/dm-snap.c 		chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) +
e                1906 drivers/md/dm-snap.c 				(chunk - e->old_chunk)) +
e                1920 drivers/md/dm-snap.c static void zero_exception(struct dm_snapshot *s, struct dm_exception *e,
e                1943 drivers/md/dm-snap.c 	struct dm_exception *e;
e                1995 drivers/md/dm-snap.c 	e = dm_lookup_exception(&s->complete, chunk);
e                1996 drivers/md/dm-snap.c 	if (e) {
e                1997 drivers/md/dm-snap.c 		remap_exception(s, e, bio, chunk);
e                2002 drivers/md/dm-snap.c 			zero_exception(s, e, bio, chunk);
e                2031 drivers/md/dm-snap.c 			e = dm_lookup_exception(&s->complete, chunk);
e                2032 drivers/md/dm-snap.c 			if (e) {
e                2034 drivers/md/dm-snap.c 				remap_exception(s, e, bio, chunk);
e                2059 drivers/md/dm-snap.c 		remap_exception(s, &pe->e, bio, chunk);
e                2111 drivers/md/dm-snap.c 	struct dm_exception *e;
e                2141 drivers/md/dm-snap.c 	e = dm_lookup_exception(&s->complete, chunk);
e                2142 drivers/md/dm-snap.c 	if (e) {
e                2154 drivers/md/dm-snap.c 		remap_exception(s, e, bio, chunk);
e                2424 drivers/md/dm-snap.c 	struct dm_exception *e;
e                2465 drivers/md/dm-snap.c 			e = dm_lookup_exception(&snap->complete, chunk);
e                2466 drivers/md/dm-snap.c 			if (e)
e                2476 drivers/md/dm-snap.c 				e = dm_lookup_exception(&snap->complete, chunk);
e                2477 drivers/md/dm-snap.c 				if (e) {
e                1757 drivers/md/dm-thin-metadata.c int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e)
e                1762 drivers/md/dm-thin-metadata.c 	for (; b != e; b++) {
e                1772 drivers/md/dm-thin-metadata.c int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e)
e                1777 drivers/md/dm-thin-metadata.c 	for (; b != e; b++) {
e                 200 drivers/md/dm-thin-metadata.h int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
e                 201 drivers/md/dm-thin-metadata.h int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
e                 121 drivers/md/dm-thin.c 		      dm_block_t b, dm_block_t e, struct dm_cell_key *key)
e                 126 drivers/md/dm-thin.c 	key->block_end = e;
e                 701 drivers/md/dm-thin.c 	sector_t e = b + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
e                 707 drivers/md/dm-thin.c 		e >>= pool->sectors_per_block_shift;
e                 710 drivers/md/dm-thin.c 		(void) sector_div(e, pool->sectors_per_block);
e                 713 drivers/md/dm-thin.c 	if (e < b)
e                 715 drivers/md/dm-thin.c 		e = b;
e                 718 drivers/md/dm-thin.c 	*end = e;
e                1090 drivers/md/dm-thin.c 	dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
e                1109 drivers/md/dm-thin.c 		for (e = b + 1; e != end; e++) {
e                1110 drivers/md/dm-thin.c 			r = dm_pool_block_is_shared(pool->pmd, e, &shared);
e                1118 drivers/md/dm-thin.c 		r = issue_discard(&op, b, e);
e                1122 drivers/md/dm-thin.c 		b = e;
e                 200 drivers/md/dm-writecache.c 	struct wc_entry *e;
e                 349 drivers/md/dm-writecache.c static struct wc_memory_entry *memory_entry(struct dm_writecache *wc, struct wc_entry *e)
e                 351 drivers/md/dm-writecache.c 	return &sb(wc)->entries[e->index];
e                 354 drivers/md/dm-writecache.c static void *memory_data(struct dm_writecache *wc, struct wc_entry *e)
e                 356 drivers/md/dm-writecache.c 	return (char *)wc->block_start + (e->index << wc->block_size_bits);
e                 359 drivers/md/dm-writecache.c static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e)
e                 362 drivers/md/dm-writecache.c 		((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT));
e                 365 drivers/md/dm-writecache.c static uint64_t read_original_sector(struct dm_writecache *wc, struct wc_entry *e)
e                 368 drivers/md/dm-writecache.c 	return e->original_sector;
e                 370 drivers/md/dm-writecache.c 	return le64_to_cpu(memory_entry(wc, e)->original_sector);
e                 374 drivers/md/dm-writecache.c static uint64_t read_seq_count(struct dm_writecache *wc, struct wc_entry *e)
e                 377 drivers/md/dm-writecache.c 	return e->seq_count;
e                 379 drivers/md/dm-writecache.c 	return le64_to_cpu(memory_entry(wc, e)->seq_count);
e                 383 drivers/md/dm-writecache.c static void clear_seq_count(struct dm_writecache *wc, struct wc_entry *e)
e                 386 drivers/md/dm-writecache.c 	e->seq_count = -1;
e                 388 drivers/md/dm-writecache.c 	pmem_assign(memory_entry(wc, e)->seq_count, cpu_to_le64(-1));
e                 391 drivers/md/dm-writecache.c static void write_original_sector_seq_count(struct dm_writecache *wc, struct wc_entry *e,
e                 396 drivers/md/dm-writecache.c 	e->original_sector = original_sector;
e                 397 drivers/md/dm-writecache.c 	e->seq_count = seq_count;
e                 401 drivers/md/dm-writecache.c 	pmem_assign(*memory_entry(wc, e), me);
e                 540 drivers/md/dm-writecache.c 	struct wc_entry *e;
e                 547 drivers/md/dm-writecache.c 		e = container_of(node, struct wc_entry, rb_node);
e                 548 drivers/md/dm-writecache.c 		if (read_original_sector(wc, e) == block)
e                 551 drivers/md/dm-writecache.c 		node = (read_original_sector(wc, e) >= block ?
e                 552 drivers/md/dm-writecache.c 			e->rb_node.rb_left : e->rb_node.rb_right);
e                 556 drivers/md/dm-writecache.c 			if (read_original_sector(wc, e) >= block) {
e                 557 drivers/md/dm-writecache.c 				return e;
e                 559 drivers/md/dm-writecache.c 				node = rb_next(&e->rb_node);
e                 562 drivers/md/dm-writecache.c 				e = container_of(node, struct wc_entry, rb_node);
e                 563 drivers/md/dm-writecache.c 				return e;
e                 571 drivers/md/dm-writecache.c 			node = rb_prev(&e->rb_node);
e                 573 drivers/md/dm-writecache.c 			node = rb_next(&e->rb_node);
e                 575 drivers/md/dm-writecache.c 			return e;
e                 578 drivers/md/dm-writecache.c 			return e;
e                 579 drivers/md/dm-writecache.c 		e = e2;
e                 585 drivers/md/dm-writecache.c 	struct wc_entry *e;
e                 589 drivers/md/dm-writecache.c 		e = container_of(*node, struct wc_entry, rb_node);
e                 590 drivers/md/dm-writecache.c 		parent = &e->rb_node;
e                 591 drivers/md/dm-writecache.c 		if (read_original_sector(wc, e) > read_original_sector(wc, ins))
e                 601 drivers/md/dm-writecache.c static void writecache_unlink(struct dm_writecache *wc, struct wc_entry *e)
e                 603 drivers/md/dm-writecache.c 	list_del(&e->lru);
e                 604 drivers/md/dm-writecache.c 	rb_erase(&e->rb_node, &wc->tree);
e                 607 drivers/md/dm-writecache.c static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry *e)
e                 612 drivers/md/dm-writecache.c 			wc->current_free = e;
e                 615 drivers/md/dm-writecache.c 			if (&e->rb_node < *node)
e                 620 drivers/md/dm-writecache.c 		rb_link_node(&e->rb_node, parent, node);
e                 621 drivers/md/dm-writecache.c 		rb_insert_color(&e->rb_node, &wc->freetree);
e                 623 drivers/md/dm-writecache.c 		list_add_tail(&e->lru, &wc->freelist);
e                 636 drivers/md/dm-writecache.c 	struct wc_entry *e;
e                 642 drivers/md/dm-writecache.c 		e = wc->current_free;
e                 643 drivers/md/dm-writecache.c 		next = rb_next(&e->rb_node);
e                 644 drivers/md/dm-writecache.c 		rb_erase(&e->rb_node, &wc->freetree);
e                 651 drivers/md/dm-writecache.c 		e = container_of(wc->freelist.next, struct wc_entry, lru);
e                 652 drivers/md/dm-writecache.c 		list_del(&e->lru);
e                 658 drivers/md/dm-writecache.c 	return e;
e                 661 drivers/md/dm-writecache.c static void writecache_free_entry(struct dm_writecache *wc, struct wc_entry *e)
e                 663 drivers/md/dm-writecache.c 	writecache_unlink(wc, e);
e                 664 drivers/md/dm-writecache.c 	writecache_add_to_freelist(wc, e);
e                 665 drivers/md/dm-writecache.c 	clear_seq_count(wc, e);
e                 666 drivers/md/dm-writecache.c 	writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry));
e                 694 drivers/md/dm-writecache.c static void writecache_flush_entry(struct dm_writecache *wc, struct wc_entry *e)
e                 696 drivers/md/dm-writecache.c 	writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry));
e                 698 drivers/md/dm-writecache.c 		writecache_flush_region(wc, memory_data(wc, e), wc->block_size);
e                 701 drivers/md/dm-writecache.c static bool writecache_entry_is_committed(struct dm_writecache *wc, struct wc_entry *e)
e                 703 drivers/md/dm-writecache.c 	return read_seq_count(wc, e) < wc->seq_count;
e                 708 drivers/md/dm-writecache.c 	struct wc_entry *e, *e2;
e                 717 drivers/md/dm-writecache.c 	e = container_of(wc->lru.next, struct wc_entry, lru);
e                 718 drivers/md/dm-writecache.c 	if (writecache_entry_is_committed(wc, e)) {
e                 727 drivers/md/dm-writecache.c 		writecache_flush_entry(wc, e);
e                 728 drivers/md/dm-writecache.c 		if (unlikely(e->lru.next == &wc->lru))
e                 730 drivers/md/dm-writecache.c 		e2 = container_of(e->lru.next, struct wc_entry, lru);
e                 733 drivers/md/dm-writecache.c 		e = e2;
e                 748 drivers/md/dm-writecache.c 		struct rb_node *rb_node = rb_prev(&e->rb_node);
e                 752 drivers/md/dm-writecache.c 			if (read_original_sector(wc, e2) == read_original_sector(wc, e) &&
e                 758 drivers/md/dm-writecache.c 		if (unlikely(e->lru.prev == &wc->lru))
e                 760 drivers/md/dm-writecache.c 		e = container_of(e->lru.prev, struct wc_entry, lru);
e                 792 drivers/md/dm-writecache.c 	struct wc_entry *e;
e                 795 drivers/md/dm-writecache.c 	e = writecache_find_entry(wc, start, WFE_RETURN_FOLLOWING | WFE_LOWEST_SEQ);
e                 796 drivers/md/dm-writecache.c 	if (unlikely(!e))
e                 799 drivers/md/dm-writecache.c 	while (read_original_sector(wc, e) < end) {
e                 800 drivers/md/dm-writecache.c 		struct rb_node *node = rb_next(&e->rb_node);
e                 802 drivers/md/dm-writecache.c 		if (likely(!e->write_in_progress)) {
e                 808 drivers/md/dm-writecache.c 			writecache_free_entry(wc, e);
e                 814 drivers/md/dm-writecache.c 		e = container_of(node, struct wc_entry, rb_node);
e                 872 drivers/md/dm-writecache.c 		struct wc_entry *e = &wc->entries[b];
e                 873 drivers/md/dm-writecache.c 		e->index = b;
e                 874 drivers/md/dm-writecache.c 		e->write_in_progress = false;
e                 941 drivers/md/dm-writecache.c 		struct wc_entry *e = &wc->entries[b];
e                 944 drivers/md/dm-writecache.c 			e->original_sector = -1;
e                 945 drivers/md/dm-writecache.c 			e->seq_count = -1;
e                 948 drivers/md/dm-writecache.c 		r = memcpy_mcsafe(&wme, memory_entry(wc, e), sizeof(struct wc_memory_entry));
e                 952 drivers/md/dm-writecache.c 			e->original_sector = -1;
e                 953 drivers/md/dm-writecache.c 			e->seq_count = -1;
e                 955 drivers/md/dm-writecache.c 			e->original_sector = le64_to_cpu(wme.original_sector);
e                 956 drivers/md/dm-writecache.c 			e->seq_count = le64_to_cpu(wme.seq_count);
e                 962 drivers/md/dm-writecache.c 		struct wc_entry *e = &wc->entries[b];
e                 963 drivers/md/dm-writecache.c 		if (!writecache_entry_is_committed(wc, e)) {
e                 964 drivers/md/dm-writecache.c 			if (read_seq_count(wc, e) != -1) {
e                 966 drivers/md/dm-writecache.c 				clear_seq_count(wc, e);
e                 969 drivers/md/dm-writecache.c 			writecache_add_to_freelist(wc, e);
e                 973 drivers/md/dm-writecache.c 			old = writecache_find_entry(wc, read_original_sector(wc, e), 0);
e                 975 drivers/md/dm-writecache.c 				writecache_insert_entry(wc, e);
e                 977 drivers/md/dm-writecache.c 				if (read_seq_count(wc, old) == read_seq_count(wc, e)) {
e                 980 drivers/md/dm-writecache.c 						 (unsigned long long)b, (unsigned long long)read_original_sector(wc, e),
e                 981 drivers/md/dm-writecache.c 						 (unsigned long long)read_seq_count(wc, e));
e                 983 drivers/md/dm-writecache.c 				if (read_seq_count(wc, old) > read_seq_count(wc, e)) {
e                 987 drivers/md/dm-writecache.c 					writecache_insert_entry(wc, e);
e                1151 drivers/md/dm-writecache.c 	struct wc_entry *e;
e                1196 drivers/md/dm-writecache.c 		e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
e                1197 drivers/md/dm-writecache.c 		if (e && read_original_sector(wc, e) == bio->bi_iter.bi_sector) {
e                1199 drivers/md/dm-writecache.c 				bio_copy_block(wc, bio, memory_data(wc, e));
e                1206 drivers/md/dm-writecache.c 				bio->bi_iter.bi_sector = cache_sector(wc, e);
e                1207 drivers/md/dm-writecache.c 				if (!writecache_entry_is_committed(wc, e))
e                1212 drivers/md/dm-writecache.c 			if (e) {
e                1214 drivers/md/dm-writecache.c 					read_original_sector(wc, e) - bio->bi_iter.bi_sector;
e                1225 drivers/md/dm-writecache.c 			e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
e                1226 drivers/md/dm-writecache.c 			if (e) {
e                1227 drivers/md/dm-writecache.c 				if (!writecache_entry_is_committed(wc, e))
e                1229 drivers/md/dm-writecache.c 				if (!WC_MODE_PMEM(wc) && !e->write_in_progress) {
e                1234 drivers/md/dm-writecache.c 			e = writecache_pop_from_freelist(wc);
e                1235 drivers/md/dm-writecache.c 			if (unlikely(!e)) {
e                1239 drivers/md/dm-writecache.c 			write_original_sector_seq_count(wc, e, bio->bi_iter.bi_sector, wc->seq_count);
e                1240 drivers/md/dm-writecache.c 			writecache_insert_entry(wc, e);
e                1244 drivers/md/dm-writecache.c 				bio_copy_block(wc, bio, memory_data(wc, e));
e                1248 drivers/md/dm-writecache.c 				bio->bi_iter.bi_sector = cache_sector(wc, e);
e                1361 drivers/md/dm-writecache.c 	struct wc_entry *e;
e                1373 drivers/md/dm-writecache.c 			e = wb->wc_list[i];
e                1374 drivers/md/dm-writecache.c 			BUG_ON(!e->write_in_progress);
e                1375 drivers/md/dm-writecache.c 			e->write_in_progress = false;
e                1376 drivers/md/dm-writecache.c 			INIT_LIST_HEAD(&e->lru);
e                1378 drivers/md/dm-writecache.c 				writecache_free_entry(wc, e);
e                1399 drivers/md/dm-writecache.c 	struct wc_entry *e;
e                1408 drivers/md/dm-writecache.c 		e = c->e;
e                1410 drivers/md/dm-writecache.c 			BUG_ON(!e->write_in_progress);
e                1411 drivers/md/dm-writecache.c 			e->write_in_progress = false;
e                1412 drivers/md/dm-writecache.c 			INIT_LIST_HEAD(&e->lru);
e                1414 drivers/md/dm-writecache.c 				writecache_free_entry(wc, e);
e                1418 drivers/md/dm-writecache.c 			e++;
e                1472 drivers/md/dm-writecache.c static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e, gfp_t gfp)
e                1476 drivers/md/dm-writecache.c 	void *address = memory_data(wc, e);
e                1503 drivers/md/dm-writecache.c 	struct wc_entry *e, *f;
e                1510 drivers/md/dm-writecache.c 		e = container_of(wbl->list.prev, struct wc_entry, lru);
e                1511 drivers/md/dm-writecache.c 		list_del(&e->lru);
e                1513 drivers/md/dm-writecache.c 		max_pages = e->wc_list_contiguous;
e                1520 drivers/md/dm-writecache.c 		bio->bi_iter.bi_sector = read_original_sector(wc, e);
e                1529 drivers/md/dm-writecache.c 		BUG_ON(!wc_add_block(wb, e, GFP_NOIO));
e                1531 drivers/md/dm-writecache.c 		wb->wc_list[0] = e;
e                1537 drivers/md/dm-writecache.c 			    read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT))
e                1544 drivers/md/dm-writecache.c 			e = f;
e                1560 drivers/md/dm-writecache.c 	struct wc_entry *e, *f;
e                1568 drivers/md/dm-writecache.c 		e = container_of(wbl->list.prev, struct wc_entry, lru);
e                1569 drivers/md/dm-writecache.c 		list_del(&e->lru);
e                1571 drivers/md/dm-writecache.c 		n_sectors = e->wc_list_contiguous << (wc->block_size_bits - SECTOR_SHIFT);
e                1574 drivers/md/dm-writecache.c 		from.sector = cache_sector(wc, e);
e                1577 drivers/md/dm-writecache.c 		to.sector = read_original_sector(wc, e);
e                1582 drivers/md/dm-writecache.c 		c->e = e;
e                1583 drivers/md/dm-writecache.c 		c->n_entries = e->wc_list_contiguous;
e                1588 drivers/md/dm-writecache.c 			BUG_ON(f != e + 1);
e                1590 drivers/md/dm-writecache.c 			e = f;
e                1603 drivers/md/dm-writecache.c 	struct wc_entry *f, *g, *e = NULL;
e                1641 drivers/md/dm-writecache.c 			if (unlikely(!e)) {
e                1643 drivers/md/dm-writecache.c 				e = container_of(rb_first(&wc->tree), struct wc_entry, rb_node);
e                1645 drivers/md/dm-writecache.c 				e = g;
e                1647 drivers/md/dm-writecache.c 			e = container_of(wc->lru.prev, struct wc_entry, lru);
e                1648 drivers/md/dm-writecache.c 		BUG_ON(e->write_in_progress);
e                1649 drivers/md/dm-writecache.c 		if (unlikely(!writecache_entry_is_committed(wc, e))) {
e                1652 drivers/md/dm-writecache.c 		node = rb_prev(&e->rb_node);
e                1656 drivers/md/dm-writecache.c 				     read_original_sector(wc, e))) {
e                1658 drivers/md/dm-writecache.c 				list_del(&e->lru);
e                1659 drivers/md/dm-writecache.c 				list_add(&e->lru, &skipped);
e                1665 drivers/md/dm-writecache.c 		list_del(&e->lru);
e                1666 drivers/md/dm-writecache.c 		list_add(&e->lru, &wbl.list);
e                1668 drivers/md/dm-writecache.c 		e->write_in_progress = true;
e                1669 drivers/md/dm-writecache.c 		e->wc_list_contiguous = 1;
e                1671 drivers/md/dm-writecache.c 		f = e;
e                1707 drivers/md/dm-writecache.c 			e->wc_list_contiguous++;
e                1708 drivers/md/dm-writecache.c 			if (unlikely(e->wc_list_contiguous == BIO_MAX_PAGES)) {
e                1752 drivers/md/dm-writecache.c 	struct wc_entry e;
e                1772 drivers/md/dm-writecache.c 	e.index = n_blocks;
e                1773 drivers/md/dm-writecache.c 	if (e.index != n_blocks)
e                1355 drivers/md/dm-zoned-metadata.c 	unsigned int i = 0, e = 0, chunk = 0;
e                1375 drivers/md/dm-zoned-metadata.c 			e = 0;
e                1379 drivers/md/dm-zoned-metadata.c 		dzone_id = le32_to_cpu(dmap[e].dzone_id);
e                1400 drivers/md/dm-zoned-metadata.c 		bzone_id = le32_to_cpu(dmap[e].bzone_id);
e                1426 drivers/md/dm-zoned-metadata.c 		e++;
e                1427 drivers/md/dm-zoned-metadata.c 		if (e >= DMZ_MAP_ENTRIES)
e                1691 drivers/md/md-bitmap.c void md_bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
e                1695 drivers/md/md-bitmap.c 	for (chunk = s; chunk <= e; chunk++) {
e                 251 drivers/md/md-bitmap.h void md_bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e);
e                4434 drivers/md/md.c 	char *e;
e                4435 drivers/md/md.c 	int major = simple_strtoul(buf, &e, 10);
e                4441 drivers/md/md.c 	if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
e                4443 drivers/md/md.c 	minor = simple_strtoul(e+1, &e, 10);
e                4444 drivers/md/md.c 	if (*e && *e != '\n')
e                4588 drivers/md/md.c 	char *e;
e                4626 drivers/md/md.c 	major = simple_strtoul(buf, &e, 10);
e                4628 drivers/md/md.c 	if (e==buf || *e != '.')
e                4630 drivers/md/md.c 	buf = e+1;
e                4631 drivers/md/md.c 	minor = simple_strtoul(buf, &e, 10);
e                4632 drivers/md/md.c 	if (e==buf || (*e && *e != '\n') )
e                 269 drivers/md/raid5-ppl.c 	struct ppl_header_entry *e = NULL;
e                 332 drivers/md/raid5-ppl.c 			e = last;
e                 335 drivers/md/raid5-ppl.c 	if (!e) {
e                 336 drivers/md/raid5-ppl.c 		e = &pplhdr->entries[io->entries_count++];
e                 337 drivers/md/raid5-ppl.c 		e->data_sector = cpu_to_le64(data_sector);
e                 338 drivers/md/raid5-ppl.c 		e->parity_disk = cpu_to_le32(sh->pd_idx);
e                 339 drivers/md/raid5-ppl.c 		e->checksum = cpu_to_le32(~0);
e                 342 drivers/md/raid5-ppl.c 	le32_add_cpu(&e->data_size, data_disks << PAGE_SHIFT);
e                 346 drivers/md/raid5-ppl.c 		le32_add_cpu(&e->pp_size, PAGE_SIZE);
e                 348 drivers/md/raid5-ppl.c 		e->checksum = cpu_to_le32(crc32c_le(le32_to_cpu(e->checksum),
e                 446 drivers/md/raid5-ppl.c 		struct ppl_header_entry *e = &pplhdr->entries[i];
e                 449 drivers/md/raid5-ppl.c 			 __func__, io->seq, i, le64_to_cpu(e->data_sector),
e                 450 drivers/md/raid5-ppl.c 			 le32_to_cpu(e->pp_size), le32_to_cpu(e->data_size));
e                 452 drivers/md/raid5-ppl.c 		e->data_sector = cpu_to_le64(le64_to_cpu(e->data_sector) >>
e                 454 drivers/md/raid5-ppl.c 		e->checksum = cpu_to_le32(~le32_to_cpu(e->checksum));
e                 795 drivers/md/raid5-ppl.c static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
e                 811 drivers/md/raid5-ppl.c 	unsigned int pp_size = le32_to_cpu(e->pp_size);
e                 812 drivers/md/raid5-ppl.c 	unsigned int data_size = le32_to_cpu(e->data_size);
e                 822 drivers/md/raid5-ppl.c 	r_sector_first = le64_to_cpu(e->data_sector) * (block_size >> 9);
e                 945 drivers/md/raid5-ppl.c 		BUG_ON(sh.pd_idx != le32_to_cpu(e->parity_disk));
e                 988 drivers/md/raid5-ppl.c 		struct ppl_header_entry *e = &pplhdr->entries[i];
e                 989 drivers/md/raid5-ppl.c 		u32 pp_size = le32_to_cpu(e->pp_size);
e                 999 drivers/md/raid5-ppl.c 		crc_stored = le32_to_cpu(e->checksum);
e                1030 drivers/md/raid5-ppl.c 			ret = ppl_recover_entry(log, e, ppl_sector);
e                 312 drivers/media/cec/cec-pin-error-inj.c 		u64 e = pin->error_inj[i];
e                 322 drivers/media/cec/cec-pin-error-inj.c 			mode = (e >> mode_offset) & CEC_ERROR_INJ_MODE_MASK;
e                 161 drivers/media/cec/cec-pin.c 	u64 e = pin->error_inj[cmd];
e                 162 drivers/media/cec/cec-pin.c 	unsigned int mode = (e >> mode_offset) & CEC_ERROR_INJ_MODE_MASK;
e                 223 drivers/media/cec/cec-pin.c 	u64 e = pin->error_inj[cmd];
e                 224 drivers/media/cec/cec-pin.c 	unsigned int mode = (e >> mode_offset) & CEC_ERROR_INJ_MODE_MASK;
e                 242 drivers/media/dvb-core/dvb_frontend.c 	struct dvb_frontend_event *e;
e                 258 drivers/media/dvb-core/dvb_frontend.c 	e = &events->events[events->eventw];
e                 259 drivers/media/dvb-core/dvb_frontend.c 	e->status = status;
e                 260 drivers/media/dvb-core/dvb_frontend.c 	e->parameters = fepriv->parameters_out;
e                9476 drivers/media/dvb-frontends/drx39xyj/drxj.c 	u32 e = 0;		/* exponent value used for QAM BER/SER */
e                9564 drivers/media/dvb-frontends/drx39xyj/drxj.c 	e = (qsym_err_vd & QAM_VD_NR_QSYM_ERRORS_EXP__M) >>
e                9569 drivers/media/dvb-frontends/drx39xyj/drxj.c 	if ((m << e) >> 3 > 549752)
e                9570 drivers/media/dvb-frontends/drx39xyj/drxj.c 		qam_vd_ser = 500000 * vd_bit_cnt * ((e > 2) ? 1 : 8) / 8;
e                9572 drivers/media/dvb-frontends/drx39xyj/drxj.c 		qam_vd_ser = m << ((e > 2) ? (e - 3) : e);
e                9585 drivers/media/dvb-frontends/drx39xyj/drxj.c 	e = (pre_bit_err_rs & FEC_RS_NR_BIT_ERRORS_EXP__M) >>
e                9590 drivers/media/dvb-frontends/drx39xyj/drxj.c 	ber_cnt = m << e;
e                9593 drivers/media/dvb-frontends/drx39xyj/drxj.c 	if (m > (rs_bit_cnt >> (e + 1)) || (rs_bit_cnt >> e) == 0)
e                9594 drivers/media/dvb-frontends/drx39xyj/drxj.c 		qam_pre_rs_ber = 500000 * rs_bit_cnt >> e;
e                9612 drivers/media/dvb-frontends/drx39xyj/drxj.c 		e = post_bit_err_rs * 742686;
e                9614 drivers/media/dvb-frontends/drx39xyj/drxj.c 		qam_post_rs_ber = e / m;
e                9628 drivers/media/dvb-frontends/drx39xyj/drxj.c 		p->pre_bit_count.stat[0].uvalue += vd_bit_cnt * ((e > 2) ? 1 : 8) / 8;
e                9631 drivers/media/dvb-frontends/drx39xyj/drxj.c 		p->pre_bit_count.stat[0].uvalue += rs_bit_cnt >> e;
e                9635 drivers/media/dvb-frontends/drx39xyj/drxj.c 	p->post_bit_count.stat[0].uvalue += rs_bit_cnt >> e;
e                1860 drivers/media/i2c/adv7604.c #define _SEL(a,b,c,d,e,f)	{ \
e                1862 drivers/media/i2c/adv7604.c 	ADV76XX_OP_CH_SEL_##d, ADV76XX_OP_CH_SEL_##e, ADV76XX_OP_CH_SEL_##f }
e                2042 drivers/media/i2c/adv7842.c #define _SEL(a, b, c, d, e, f)	{ \
e                2044 drivers/media/i2c/adv7842.c 	ADV7842_OP_CH_SEL_##d, ADV7842_OP_CH_SEL_##e, ADV7842_OP_CH_SEL_##f }
e                2496 drivers/media/i2c/adv7842.c static int adv7842_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *e)
e                2501 drivers/media/i2c/adv7842.c 	memset(e->reserved, 0, sizeof(e->reserved));
e                2503 drivers/media/i2c/adv7842.c 	if (e->pad > ADV7842_EDID_PORT_VGA)
e                2505 drivers/media/i2c/adv7842.c 	if (e->start_block != 0)
e                2507 drivers/media/i2c/adv7842.c 	if (e->blocks > 2) {
e                2508 drivers/media/i2c/adv7842.c 		e->blocks = 2;
e                2513 drivers/media/i2c/adv7842.c 	state->aspect_ratio = v4l2_calc_aspect_ratio(e->edid[0x15],
e                2514 drivers/media/i2c/adv7842.c 			e->edid[0x16]);
e                2516 drivers/media/i2c/adv7842.c 	switch (e->pad) {
e                2519 drivers/media/i2c/adv7842.c 		state->vga_edid.present = e->blocks ? 0x1 : 0x0;
e                2520 drivers/media/i2c/adv7842.c 		memcpy(&state->vga_edid.edid, e->edid, 128 * e->blocks);
e                2526 drivers/media/i2c/adv7842.c 		if (e->blocks) {
e                2527 drivers/media/i2c/adv7842.c 			state->hdmi_edid.present |= 0x04 << e->pad;
e                2529 drivers/media/i2c/adv7842.c 			state->hdmi_edid.present &= ~(0x04 << e->pad);
e                2532 drivers/media/i2c/adv7842.c 		memcpy(&state->hdmi_edid.edid, e->edid, 128 * e->blocks);
e                2533 drivers/media/i2c/adv7842.c 		err = edid_write_hdmi_segment(sd, e->pad);
e                2539 drivers/media/i2c/adv7842.c 		v4l2_err(sd, "error %d writing edid on port %d\n", err, e->pad);
e                 174 drivers/media/pci/ivtv/ivtv-fileops.c 		struct v4l2_enc_idx_entry *e = itv->pgm_info + idx;
e                 180 drivers/media/pci/ivtv/ivtv-fileops.c 		e->offset = read_enc(addr + 4) + ((u64)read_enc(addr + 8) << 32);
e                 181 drivers/media/pci/ivtv/ivtv-fileops.c 		if (e->offset > itv->mpg_data_received) {
e                 184 drivers/media/pci/ivtv/ivtv-fileops.c 		e->offset += itv->vbi_data_inserted;
e                 185 drivers/media/pci/ivtv/ivtv-fileops.c 		e->length = read_enc(addr);
e                 186 drivers/media/pci/ivtv/ivtv-fileops.c 		e->pts = read_enc(addr + 16) + ((u64)(read_enc(addr + 20) & 1) << 32);
e                 187 drivers/media/pci/ivtv/ivtv-fileops.c 		e->flags = mapping[read_enc(addr + 12) & 7];
e                1257 drivers/media/pci/ivtv/ivtv-ioctl.c 	struct v4l2_enc_idx_entry *e = idx->entry;
e                1270 drivers/media/pci/ivtv/ivtv-ioctl.c 		*e = itv->pgm_info[(itv->pgm_info_read_idx + i) % IVTV_MAX_PGM_INDEX];
e                1271 drivers/media/pci/ivtv/ivtv-ioctl.c 		if ((e->flags & V4L2_ENC_IDX_FRAME_MASK) <= V4L2_ENC_IDX_FRAME_B) {
e                1273 drivers/media/pci/ivtv/ivtv-ioctl.c 			e++;
e                  27 drivers/media/platform/vsp1/vsp1_drm.c #define BRX_NAME(e)	(e)->type == VSP1_ENTITY_BRU ? "BRU" : "BRS"
e                 577 drivers/media/platform/vsp1/vsp1_video.c 		struct vsp1_entity *e;
e                 583 drivers/media/platform/vsp1/vsp1_video.c 		e = to_vsp1_entity(subdev);
e                 584 drivers/media/platform/vsp1/vsp1_video.c 		list_add_tail(&e->list_pipe, &pipe->entities);
e                 585 drivers/media/platform/vsp1/vsp1_video.c 		e->pipe = pipe;
e                 587 drivers/media/platform/vsp1/vsp1_video.c 		switch (e->type) {
e                 601 drivers/media/platform/vsp1/vsp1_video.c 			pipe->lif = e;
e                 606 drivers/media/platform/vsp1/vsp1_video.c 			pipe->brx = e;
e                 610 drivers/media/platform/vsp1/vsp1_video.c 			pipe->hgo = e;
e                 614 drivers/media/platform/vsp1/vsp1_video.c 			pipe->hgt = e;
e                  48 drivers/media/platform/xilinx/xilinx-dma.h static inline struct xvip_pipeline *to_xvip_pipeline(struct media_entity *e)
e                  50 drivers/media/platform/xilinx/xilinx-dma.h 	return container_of(e->pipe, struct xvip_pipeline, pipe);
e                 195 drivers/media/rc/ir-imon-decoder.c 	struct ir_raw_event *e = events;
e                 200 drivers/media/rc/ir-imon-decoder.c 	init_ir_raw_event_duration(e, 1, IMON_UNIT);
e                 208 drivers/media/rc/ir-imon-decoder.c 		if (pulse == e->pulse) {
e                 209 drivers/media/rc/ir-imon-decoder.c 			e->duration += IMON_UNIT;
e                 213 drivers/media/rc/ir-imon-decoder.c 			init_ir_raw_event_duration(++e, pulse, IMON_UNIT);
e                 218 drivers/media/rc/ir-imon-decoder.c 		if (pulse == e->pulse) {
e                 219 drivers/media/rc/ir-imon-decoder.c 			e->duration += IMON_UNIT;
e                 223 drivers/media/rc/ir-imon-decoder.c 			init_ir_raw_event_duration(++e, pulse, IMON_UNIT);
e                 227 drivers/media/rc/ir-imon-decoder.c 	if (e->pulse)
e                 228 drivers/media/rc/ir-imon-decoder.c 		e++;
e                 230 drivers/media/rc/ir-imon-decoder.c 	return e - events;
e                 191 drivers/media/rc/ir-jvc-decoder.c 	struct ir_raw_event *e = events;
e                 196 drivers/media/rc/ir-jvc-decoder.c 	ret = ir_raw_gen_pd(&e, max, &ir_jvc_timings, JVC_NBITS, raw);
e                 200 drivers/media/rc/ir-jvc-decoder.c 	return e - events;
e                 402 drivers/media/rc/ir-mce_kbd-decoder.c 	struct ir_raw_event *e = events;
e                 416 drivers/media/rc/ir-mce_kbd-decoder.c 	ret = ir_raw_gen_manchester(&e, max, &ir_mce_kbd_timings, len, raw);
e                 420 drivers/media/rc/ir-mce_kbd-decoder.c 	return e - events;
e                 235 drivers/media/rc/ir-nec-decoder.c 	struct ir_raw_event *e = events;
e                 243 drivers/media/rc/ir-nec-decoder.c 	ret = ir_raw_gen_pd(&e, max, &ir_nec_timings, NEC_NBITS, raw);
e                 247 drivers/media/rc/ir-nec-decoder.c 	return e - events;
e                 213 drivers/media/rc/ir-rc5-decoder.c 	struct ir_raw_event *e = events;
e                 226 drivers/media/rc/ir-rc5-decoder.c 		ret = ir_raw_gen_manchester(&e, max, &ir_rc5_timings,
e                 242 drivers/media/rc/ir-rc5-decoder.c 		ret = ir_raw_gen_manchester(&e, max, &ir_rc5x_timings[0],
e                 247 drivers/media/rc/ir-rc5-decoder.c 		ret = ir_raw_gen_manchester(&e, max - (e - events),
e                 256 drivers/media/rc/ir-rc5-decoder.c 		ret = ir_raw_gen_manchester(&e, max, &ir_rc5_sz_timings,
e                 265 drivers/media/rc/ir-rc5-decoder.c 	return e - events;
e                 313 drivers/media/rc/ir-rc6-decoder.c 	struct ir_raw_event *e = events;
e                 317 drivers/media/rc/ir-rc6-decoder.c 		ret = ir_raw_gen_manchester(&e, max - (e - events),
e                 324 drivers/media/rc/ir-rc6-decoder.c 		ret = ir_raw_gen_manchester(&e, max - (e - events),
e                 330 drivers/media/rc/ir-rc6-decoder.c 		ret = ir_raw_gen_manchester(&e, max - (e - events),
e                 355 drivers/media/rc/ir-rc6-decoder.c 		ret = ir_raw_gen_manchester(&e, max - (e - events),
e                 362 drivers/media/rc/ir-rc6-decoder.c 		ret = ir_raw_gen_manchester(&e, max - (e - events),
e                 368 drivers/media/rc/ir-rc6-decoder.c 		ret = ir_raw_gen_manchester(&e, max - (e - events),
e                 376 drivers/media/rc/ir-rc6-decoder.c 	return e - events;
e                 202 drivers/media/rc/ir-rcmm-decoder.c 	struct ir_raw_event *e = events;
e                 207 drivers/media/rc/ir-rcmm-decoder.c 		ret = ir_rcmm_rawencoder(&e, max, 32, scancode);
e                 210 drivers/media/rc/ir-rcmm-decoder.c 		ret = ir_rcmm_rawencoder(&e, max, 24, scancode);
e                 213 drivers/media/rc/ir-rcmm-decoder.c 		ret = ir_rcmm_rawencoder(&e, max, 12, scancode);
e                 222 drivers/media/rc/ir-rcmm-decoder.c 	return e - events;
e                 192 drivers/media/rc/ir-sanyo-decoder.c 	struct ir_raw_event *e = events;
e                 201 drivers/media/rc/ir-sanyo-decoder.c 	ret = ir_raw_gen_pd(&e, max, &ir_sanyo_timings, SANYO_NBITS, raw);
e                 205 drivers/media/rc/ir-sanyo-decoder.c 	return e - events;
e                 193 drivers/media/rc/ir-sharp-decoder.c 	struct ir_raw_event *e = events;
e                 199 drivers/media/rc/ir-sharp-decoder.c 	ret = ir_raw_gen_pd(&e, max, &ir_sharp_timings, SHARP_NBITS,
e                 208 drivers/media/rc/ir-sharp-decoder.c 	ret = ir_raw_gen_pd(&e, max, &ir_sharp_timings, SHARP_NBITS,
e                 213 drivers/media/rc/ir-sharp-decoder.c 	return e - events;
e                 190 drivers/media/rc/ir-sony-decoder.c 	struct ir_raw_event *e = events;
e                 206 drivers/media/rc/ir-sony-decoder.c 	ret = ir_raw_gen_pl(&e, max, &ir_sony_timings, len, raw);
e                 210 drivers/media/rc/ir-sony-decoder.c 	return e - events;
e                 470 drivers/media/rc/rc-main.c 	const struct rc_map_table *e = elt;
e                 472 drivers/media/rc/rc-main.c 	if (*scancode < e->scancode)
e                 474 drivers/media/rc/rc-main.c 	else if (*scancode > e->scancode)
e                  53 drivers/media/tuners/tda9887.c 	unsigned char     e;
e                 146 drivers/media/tuners/tda9887.c 		.e     = ( cGating_36     |
e                 157 drivers/media/tuners/tda9887.c 		.e     = ( cGating_36     |
e                 168 drivers/media/tuners/tda9887.c 		.e     = ( cGating_36     |
e                 179 drivers/media/tuners/tda9887.c 		.e     = ( cGating_36     |
e                 188 drivers/media/tuners/tda9887.c 		.e     = ( cAudioIF_5_5   |
e                 196 drivers/media/tuners/tda9887.c 		.e     = ( cGating_36	  |
e                 206 drivers/media/tuners/tda9887.c 		.e     = ( cGating_36	  |
e                 217 drivers/media/tuners/tda9887.c 		.e     = ( cGating_36     |
e                 228 drivers/media/tuners/tda9887.c 		.e     = ( cGating_36     |
e                 239 drivers/media/tuners/tda9887.c 		.e     = ( cGating_36     |
e                 252 drivers/media/tuners/tda9887.c 	.e    = ( cTunerGainLow  |
e                 264 drivers/media/tuners/tda9887.c 	.e    = ( cTunerGainLow  |
e                 429 drivers/media/tuners/tda9887.c 	buf[3] = norm->e;
e                1298 drivers/media/usb/cx231xx/cx231xx-cards.c 			struct eeprom *e = kzalloc(sizeof(*e), GFP_KERNEL);
e                1300 drivers/media/usb/cx231xx/cx231xx-cards.c 			if (e == NULL) {
e                1305 drivers/media/usb/cx231xx/cx231xx-cards.c 			e->client.adapter = cx231xx_get_i2c_adap(dev, I2C_1_MUX_1);
e                1306 drivers/media/usb/cx231xx/cx231xx-cards.c 			e->client.addr = 0xa0 >> 1;
e                1308 drivers/media/usb/cx231xx/cx231xx-cards.c 			read_eeprom(dev, &e->client, e->eeprom, sizeof(e->eeprom));
e                1309 drivers/media/usb/cx231xx/cx231xx-cards.c 			tveeprom_hauppauge_analog(&e->tvee, e->eeprom + 0xc0);
e                1310 drivers/media/usb/cx231xx/cx231xx-cards.c 			kfree(e);
e                 811 drivers/media/usb/dvb-usb/dib0700_core.c 	const struct usb_endpoint_descriptor *e;
e                 841 drivers/media/usb/dvb-usb/dib0700_core.c 	e = &intf->cur_altsetting->endpoint[rc_ep].desc;
e                 842 drivers/media/usb/dvb-usb/dib0700_core.c 	if (usb_endpoint_dir_in(e)) {
e                 843 drivers/media/usb/dvb-usb/dib0700_core.c 		if (usb_endpoint_xfer_bulk(e)) {
e                 850 drivers/media/usb/dvb-usb/dib0700_core.c 		} else if (usb_endpoint_xfer_int(e)) {
e                 745 drivers/media/usb/em28xx/em28xx-audio.c 				       struct usb_endpoint_descriptor *e)
e                 747 drivers/media/usb/em28xx/em28xx-audio.c 	int size = le16_to_cpu(e->wMaxPacketSize);
e                 758 drivers/media/usb/em28xx/em28xx-audio.c 	struct usb_endpoint_descriptor *e, *ep = NULL;
e                 778 drivers/media/usb/em28xx/em28xx-audio.c 		e = &intf->altsetting[alt].endpoint[i].desc;
e                 779 drivers/media/usb/em28xx/em28xx-audio.c 		if (!usb_endpoint_dir_in(e))
e                 781 drivers/media/usb/em28xx/em28xx-audio.c 		if (e->bEndpointAddress == EM28XX_EP_AUDIO) {
e                 782 drivers/media/usb/em28xx/em28xx-audio.c 			ep = e;
e                3605 drivers/media/usb/em28xx/em28xx-cards.c 	const struct usb_endpoint_descriptor *e;
e                3633 drivers/media/usb/em28xx/em28xx-cards.c 	e = &intf->altsetting[alt].endpoint[ep].desc;
e                3635 drivers/media/usb/em28xx/em28xx-cards.c 	if (!usb_endpoint_dir_in(e))
e                3638 drivers/media/usb/em28xx/em28xx-cards.c 	sizedescr = le16_to_cpu(e->wMaxPacketSize);
e                3646 drivers/media/usb/em28xx/em28xx-cards.c 	switch (e->bEndpointAddress) {
e                3649 drivers/media/usb/em28xx/em28xx-cards.c 		if (usb_endpoint_xfer_isoc(e)) {
e                3650 drivers/media/usb/em28xx/em28xx-cards.c 			dev->analog_ep_isoc = e->bEndpointAddress;
e                3652 drivers/media/usb/em28xx/em28xx-cards.c 		} else if (usb_endpoint_xfer_bulk(e)) {
e                3653 drivers/media/usb/em28xx/em28xx-cards.c 			dev->analog_ep_bulk = e->bEndpointAddress;
e                3657 drivers/media/usb/em28xx/em28xx-cards.c 		if (usb_endpoint_xfer_isoc(e))
e                3664 drivers/media/usb/em28xx/em28xx-cards.c 		if (*has_video && (usb_endpoint_xfer_bulk(e))) {
e                3665 drivers/media/usb/em28xx/em28xx-cards.c 			dev->analog_ep_bulk = e->bEndpointAddress;
e                3667 drivers/media/usb/em28xx/em28xx-cards.c 			if (usb_endpoint_xfer_isoc(e)) {
e                3677 drivers/media/usb/em28xx/em28xx-cards.c 					dev->dvb_ep_isoc = e->bEndpointAddress;
e                3683 drivers/media/usb/em28xx/em28xx-cards.c 				dev->dvb_ep_bulk = e->bEndpointAddress;
e                3688 drivers/media/usb/em28xx/em28xx-cards.c 		if (usb_endpoint_xfer_isoc(e)) {
e                3690 drivers/media/usb/em28xx/em28xx-cards.c 				dev->dvb_ep_isoc_ts2 = e->bEndpointAddress;
e                3695 drivers/media/usb/em28xx/em28xx-cards.c 			dev->dvb_ep_bulk_ts2 = e->bEndpointAddress;
e                 561 drivers/media/usb/gspca/cpia1.c 			       u8 e, u8 f, u8 g, u8 h,
e                 574 drivers/media/usb/gspca/cpia1.c 	gspca_dev->usb_buf[0] = e;
e                 177 drivers/media/usb/stk1160/stk1160-core.c 	int i, e, sizedescr, size, ifnum;
e                 188 drivers/media/usb/stk1160/stk1160-core.c 		for (e = 0; e < intf->altsetting[i].desc.bNumEndpoints; e++) {
e                 191 drivers/media/usb/stk1160/stk1160-core.c 			desc = &intf->altsetting[i].endpoint[e].desc;
e                1236 drivers/media/usb/tm6000/tm6000-cards.c 			struct usb_host_endpoint	*e;
e                1239 drivers/media/usb/tm6000/tm6000-cards.c 			e = &interface->altsetting[i].endpoint[ep];
e                1241 drivers/media/usb/tm6000/tm6000-cards.c 			dir_out = ((e->desc.bEndpointAddress &
e                1249 drivers/media/usb/tm6000/tm6000-cards.c 			switch (e->desc.bmAttributes) {
e                1254 drivers/media/usb/tm6000/tm6000-cards.c 							 "Bulk IN", e,
e                1259 drivers/media/usb/tm6000/tm6000-cards.c 							 "Bulk OUT", e,
e                1267 drivers/media/usb/tm6000/tm6000-cards.c 							 "ISOC IN", e,
e                1272 drivers/media/usb/tm6000/tm6000-cards.c 							 "ISOC OUT", e,
e                1280 drivers/media/usb/tm6000/tm6000-cards.c 							"INT IN", e,
e                1285 drivers/media/usb/tm6000/tm6000-cards.c 							"INT OUT", e,
e                 971 drivers/mfd/twl-core.c 	int e = 0;
e                 973 drivers/mfd/twl-core.c 	e = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, 0,
e                 975 drivers/mfd/twl-core.c 	return e;
e                 980 drivers/mfd/twl-core.c 	int e = 0;
e                 982 drivers/mfd/twl-core.c 	e |= twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG1,
e                 984 drivers/mfd/twl-core.c 	e |= twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG2,
e                 987 drivers/mfd/twl-core.c 	return e;
e                 993 drivers/mfd/twl-core.c 	int e = 0;
e                1024 drivers/mfd/twl-core.c 	e |= unprotect_pm_master();
e                1026 drivers/mfd/twl-core.c 	e |= twl_i2c_write_u8(TWL_MODULE_PM_MASTER, ctrl, R_CFG_BOOT);
e                1027 drivers/mfd/twl-core.c 	e |= protect_pm_master();
e                1029 drivers/mfd/twl-core.c 	if (e < 0)
e                1030 drivers/mfd/twl-core.c 		pr_err("%s: clock init err [%d]\n", DRIVER_NAME, e);
e                 284 drivers/misc/cxl/trace.h 	TP_PROTO(struct cxl_context *ctx, unsigned int idx, u64 e, u64 v),
e                 286 drivers/misc/cxl/trace.h 	TP_ARGS(ctx, idx, e, v),
e                 293 drivers/misc/cxl/trace.h 		__field(u64, e)
e                 302 drivers/misc/cxl/trace.h 		__entry->e = e;
e                 311 drivers/misc/cxl/trace.h 		__entry->e,
e                 361 drivers/misc/genwqe/card_base.c 	unsigned int type, e = 0;
e                 366 drivers/misc/genwqe/card_base.c 			e = genwqe_ffdc_buff_size(cd, 0);
e                 369 drivers/misc/genwqe/card_base.c 			e = genwqe_ffdc_buff_size(cd, 1);
e                 372 drivers/misc/genwqe/card_base.c 			e = genwqe_ffdc_buff_size(cd, 2);
e                 375 drivers/misc/genwqe/card_base.c 			e = GENWQE_FFDC_REGS;
e                 380 drivers/misc/genwqe/card_base.c 		cd->ffdc[type].entries = e;
e                 382 drivers/misc/genwqe/card_base.c 			kmalloc_array(e, sizeof(struct genwqe_reg),
e                 905 drivers/misc/genwqe/card_utils.c 	u64 eevptr, e, val, addr;
e                 913 drivers/misc/genwqe/card_utils.c 			e = __genwqe_readq(cd, l_addr);
e                 914 drivers/misc/genwqe/card_utils.c 			if ((e == 0x0) || (e == 0xffffffffffffffffull))
e                 917 drivers/misc/genwqe/card_utils.c 			d_addr = (e & 0x0000000000ffffffull);	    /* 23:0 */
e                 918 drivers/misc/genwqe/card_utils.c 			d_len  = (e & 0x0000007fff000000ull) >> 24; /* 38:24 */
e                 919 drivers/misc/genwqe/card_utils.c 			d_type = (e & 0x0000008000000000ull) >> 36; /* 39 */
e                   9 drivers/misc/ocxl/config.c #define EXTRACT_BITS(val, s, e) ((val & GENMASK(e, s)) >> s)
e                  45 drivers/misc/vmw_vmci/vmci_event.c 	int e;
e                  48 drivers/misc/vmw_vmci/vmci_event.c 	for (e = 0; e < VMCI_EVENT_MAX; e++) {
e                  50 drivers/misc/vmw_vmci/vmci_event.c 		list_for_each_entry_safe(cur, p2, &subscriber_array[e], node) {
e                  69 drivers/misc/vmw_vmci/vmci_event.c 	int e;
e                  71 drivers/misc/vmw_vmci/vmci_event.c 	for (e = 0; e < VMCI_EVENT_MAX; e++) {
e                  73 drivers/misc/vmw_vmci/vmci_event.c 		list_for_each_entry(cur, &subscriber_array[e], node) {
e                 136 drivers/mmc/core/mmc.c 	unsigned int e, m, a, b;
e                 153 drivers/mmc/core/mmc.c 	e = UNSTUFF_BITS(resp, 112, 3);
e                 154 drivers/mmc/core/mmc.c 	csd->taac_ns	 = (taac_exp[e] * taac_mant[m] + 9) / 10;
e                 158 drivers/mmc/core/mmc.c 	e = UNSTUFF_BITS(resp, 96, 3);
e                 159 drivers/mmc/core/mmc.c 	csd->max_dtr	  = tran_exp[e] * tran_mant[m];
e                 162 drivers/mmc/core/mmc.c 	e = UNSTUFF_BITS(resp, 47, 3);
e                 164 drivers/mmc/core/mmc.c 	csd->capacity	  = (1 + m) << (e + 2);
e                 102 drivers/mmc/core/sd.c 	unsigned int e, m, csd_struct;
e                 110 drivers/mmc/core/sd.c 		e = UNSTUFF_BITS(resp, 112, 3);
e                 111 drivers/mmc/core/sd.c 		csd->taac_ns	 = (taac_exp[e] * taac_mant[m] + 9) / 10;
e                 115 drivers/mmc/core/sd.c 		e = UNSTUFF_BITS(resp, 96, 3);
e                 116 drivers/mmc/core/sd.c 		csd->max_dtr	  = tran_exp[e] * tran_mant[m];
e                 119 drivers/mmc/core/sd.c 		e = UNSTUFF_BITS(resp, 47, 3);
e                 121 drivers/mmc/core/sd.c 		csd->capacity	  = (1 + m) << (e + 2);
e                 152 drivers/mmc/core/sd.c 		e = UNSTUFF_BITS(resp, 96, 3);
e                 153 drivers/mmc/core/sd.c 		csd->max_dtr	  = tran_exp[e] * tran_mant[m];
e                 149 drivers/mmc/host/cb710-mmc.c 	u32 e, x;
e                 150 drivers/mmc/host/cb710-mmc.c 	e = cb710_read_port_32(slot, CB710_MMC_STATUS_PORT);
e                 170 drivers/mmc/host/cb710-mmc.c 			limit, what, e, x);
e                 182 drivers/mmc/host/cb710-mmc.c 	u32 e, x;
e                 183 drivers/mmc/host/cb710-mmc.c 	e = cb710_read_port_32(slot, CB710_MMC_STATUS_PORT);
e                 203 drivers/mmc/host/cb710-mmc.c 			limit, mask, e, x);
e                  29 drivers/mtd/ubi/fastmap-wl.c 	struct ubi_wl_entry *e, *victim = NULL;
e                  32 drivers/mtd/ubi/fastmap-wl.c 	ubi_rb_for_each_entry(p, e, root, u.rb) {
e                  33 drivers/mtd/ubi/fastmap-wl.c 		if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
e                  34 drivers/mtd/ubi/fastmap-wl.c 			victim = e;
e                  35 drivers/mtd/ubi/fastmap-wl.c 			max_ec = e->ec;
e                  51 drivers/mtd/ubi/fastmap-wl.c 	struct ubi_wl_entry *e;
e                  54 drivers/mtd/ubi/fastmap-wl.c 		e = ubi->lookuptbl[pool->pebs[i]];
e                  55 drivers/mtd/ubi/fastmap-wl.c 		wl_tree_add(e, &ubi->free);
e                  63 drivers/mtd/ubi/fastmap-wl.c 	struct ubi_wl_entry *e;
e                  65 drivers/mtd/ubi/fastmap-wl.c 	ubi_rb_for_each_entry(p, e, root, u.rb)
e                  66 drivers/mtd/ubi/fastmap-wl.c 		if (e->pnum < UBI_FM_MAX_START)
e                  83 drivers/mtd/ubi/fastmap-wl.c 	struct ubi_wl_entry *e = NULL;
e                  89 drivers/mtd/ubi/fastmap-wl.c 		e = find_anchor_wl_entry(&ubi->free);
e                  91 drivers/mtd/ubi/fastmap-wl.c 		e = find_mean_wl_entry(ubi, &ubi->free);
e                  93 drivers/mtd/ubi/fastmap-wl.c 	if (!e)
e                  96 drivers/mtd/ubi/fastmap-wl.c 	self_check_in_wl_tree(ubi, e, &ubi->free);
e                 100 drivers/mtd/ubi/fastmap-wl.c 	rb_erase(&e->u.rb, &ubi->free);
e                 103 drivers/mtd/ubi/fastmap-wl.c 	return e;
e                 114 drivers/mtd/ubi/fastmap-wl.c 	struct ubi_wl_entry *e;
e                 131 drivers/mtd/ubi/fastmap-wl.c 			e = wl_get_wle(ubi);
e                 132 drivers/mtd/ubi/fastmap-wl.c 			if (!e)
e                 135 drivers/mtd/ubi/fastmap-wl.c 			pool->pebs[pool->size] = e->pnum;
e                 145 drivers/mtd/ubi/fastmap-wl.c 			e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
e                 146 drivers/mtd/ubi/fastmap-wl.c 			self_check_in_wl_tree(ubi, e, &ubi->free);
e                 147 drivers/mtd/ubi/fastmap-wl.c 			rb_erase(&e->u.rb, &ubi->free);
e                 150 drivers/mtd/ubi/fastmap-wl.c 			wl_pool->pebs[wl_pool->size] = e->pnum;
e                 316 drivers/mtd/ubi/fastmap-wl.c 	struct ubi_wl_entry *e;
e                 325 drivers/mtd/ubi/fastmap-wl.c 	e = ubi->lookuptbl[pnum];
e                 331 drivers/mtd/ubi/fastmap-wl.c 	if (!e) {
e                 332 drivers/mtd/ubi/fastmap-wl.c 		e = fm_e;
e                 333 drivers/mtd/ubi/fastmap-wl.c 		ubi_assert(e->ec >= 0);
e                 334 drivers/mtd/ubi/fastmap-wl.c 		ubi->lookuptbl[pnum] = e;
e                 340 drivers/mtd/ubi/fastmap-wl.c 	return schedule_erase(ubi, e, vol_id, lnum, torture, true);
e                 361 drivers/mtd/ubi/fastmap-wl.c 			kfree(ubi->fm->e[i]);
e                 375 drivers/mtd/ubi/fastmap-wl.c 					   struct ubi_wl_entry *e,
e                 377 drivers/mtd/ubi/fastmap-wl.c 	if (e && !ubi->fm_disabled && !ubi->fm &&
e                 378 drivers/mtd/ubi/fastmap-wl.c 	    e->pnum < UBI_FM_MAX_START)
e                 379 drivers/mtd/ubi/fastmap-wl.c 		e = rb_entry(rb_next(root->rb_node),
e                 382 drivers/mtd/ubi/fastmap-wl.c 	return e;
e                1051 drivers/mtd/ubi/fastmap.c 		struct ubi_wl_entry *e;
e                1053 drivers/mtd/ubi/fastmap.c 		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
e                1054 drivers/mtd/ubi/fastmap.c 		if (!e) {
e                1056 drivers/mtd/ubi/fastmap.c 				kmem_cache_free(ubi_wl_entry_slab, fm->e[i]);
e                1062 drivers/mtd/ubi/fastmap.c 		e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
e                1063 drivers/mtd/ubi/fastmap.c 		e->ec = be32_to_cpu(fmsb2->block_ec[i]);
e                1064 drivers/mtd/ubi/fastmap.c 		fm->e[i] = e;
e                1266 drivers/mtd/ubi/fastmap.c 			wl_e = ubi_wrk->e;
e                1327 drivers/mtd/ubi/fastmap.c 	dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
e                1328 drivers/mtd/ubi/fastmap.c 	ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avbuf);
e                1335 drivers/mtd/ubi/fastmap.c 		fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
e                1336 drivers/mtd/ubi/fastmap.c 		set_seen(ubi, new_fm->e[i]->pnum, seen_pebs);
e                1337 drivers/mtd/ubi/fastmap.c 		fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
e                1348 drivers/mtd/ubi/fastmap.c 			new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
e                1349 drivers/mtd/ubi/fastmap.c 		ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvbuf);
e                1352 drivers/mtd/ubi/fastmap.c 				new_fm->e[i]->pnum);
e                1359 drivers/mtd/ubi/fastmap.c 					new_fm->e[i]->pnum, 0, ubi->leb_size);
e                1362 drivers/mtd/ubi/fastmap.c 				new_fm->e[i]->pnum);
e                1447 drivers/mtd/ubi/fastmap.c 	struct ubi_wl_entry *e;
e                1468 drivers/mtd/ubi/fastmap.c 	e = ubi_wl_get_fm_peb(ubi, 1);
e                1469 drivers/mtd/ubi/fastmap.c 	if (!e)
e                1477 drivers/mtd/ubi/fastmap.c 	ret = ubi_io_write_vid_hdr(ubi, e->pnum, vb);
e                1479 drivers/mtd/ubi/fastmap.c 		ubi_wl_put_fm_peb(ubi, e, 0, 0);
e                1484 drivers/mtd/ubi/fastmap.c 	fm->e[0] = e;
e                1512 drivers/mtd/ubi/fastmap.c 		if (fm->e[i]) {
e                1513 drivers/mtd/ubi/fastmap.c 			ubi_wl_put_fm_peb(ubi, fm->e[i], i,
e                1515 drivers/mtd/ubi/fastmap.c 			fm->e[i] = NULL;
e                1578 drivers/mtd/ubi/fastmap.c 			if (old_fm && old_fm->e[i]) {
e                1579 drivers/mtd/ubi/fastmap.c 				ret = erase_block(ubi, old_fm->e[i]->pnum);
e                1584 drivers/mtd/ubi/fastmap.c 						ubi_wl_put_fm_peb(ubi, new_fm->e[j],
e                1586 drivers/mtd/ubi/fastmap.c 						new_fm->e[j] = NULL;
e                1590 drivers/mtd/ubi/fastmap.c 				new_fm->e[i] = old_fm->e[i];
e                1591 drivers/mtd/ubi/fastmap.c 				old_fm->e[i] = NULL;
e                1596 drivers/mtd/ubi/fastmap.c 					ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
e                1597 drivers/mtd/ubi/fastmap.c 					new_fm->e[j] = NULL;
e                1604 drivers/mtd/ubi/fastmap.c 			new_fm->e[i] = tmp_e;
e                1606 drivers/mtd/ubi/fastmap.c 			if (old_fm && old_fm->e[i]) {
e                1607 drivers/mtd/ubi/fastmap.c 				ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
e                1609 drivers/mtd/ubi/fastmap.c 				old_fm->e[i] = NULL;
e                1617 drivers/mtd/ubi/fastmap.c 			ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
e                1619 drivers/mtd/ubi/fastmap.c 			old_fm->e[i] = NULL;
e                1630 drivers/mtd/ubi/fastmap.c 			ret = erase_block(ubi, old_fm->e[0]->pnum);
e                1635 drivers/mtd/ubi/fastmap.c 					ubi_wl_put_fm_peb(ubi, new_fm->e[i],
e                1637 drivers/mtd/ubi/fastmap.c 					new_fm->e[i] = NULL;
e                1641 drivers/mtd/ubi/fastmap.c 			new_fm->e[0] = old_fm->e[0];
e                1642 drivers/mtd/ubi/fastmap.c 			new_fm->e[0]->ec = ret;
e                1643 drivers/mtd/ubi/fastmap.c 			old_fm->e[0] = NULL;
e                1646 drivers/mtd/ubi/fastmap.c 			ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
e                1648 drivers/mtd/ubi/fastmap.c 			new_fm->e[0] = tmp_e;
e                1649 drivers/mtd/ubi/fastmap.c 			old_fm->e[0] = NULL;
e                1656 drivers/mtd/ubi/fastmap.c 				ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
e                1657 drivers/mtd/ubi/fastmap.c 				new_fm->e[i] = NULL;
e                1663 drivers/mtd/ubi/fastmap.c 		new_fm->e[0] = tmp_e;
e                 241 drivers/mtd/ubi/ubi.h 	struct ubi_wl_entry *e[UBI_FM_MAX_BLOCKS];
e                 804 drivers/mtd/ubi/ubi.h 	struct ubi_wl_entry *e;
e                1000 drivers/mtd/ubi/ubi.h #define ubi_for_each_free_peb(ubi, e, tmp_rb)	\
e                1001 drivers/mtd/ubi/ubi.h 	ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->free, u.rb)
e                1009 drivers/mtd/ubi/ubi.h #define ubi_for_each_used_peb(ubi, e, tmp_rb)	\
e                1010 drivers/mtd/ubi/ubi.h 	ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->used, u.rb)
e                1018 drivers/mtd/ubi/ubi.h #define ubi_for_each_scrub_peb(ubi, e, tmp_rb)	\
e                1019 drivers/mtd/ubi/ubi.h 	ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->scrub, u.rb)
e                1027 drivers/mtd/ubi/ubi.h #define ubi_for_each_protected_peb(ubi, i, e)	\
e                1029 drivers/mtd/ubi/ubi.h 		list_for_each_entry((e), &(ubi->pq[(i)]), u.list)
e                1218 drivers/mtd/ubi/ubi.h 			if (ubi->fm->e[i]->pnum == pnum)
e                1219 drivers/mtd/ubi/ubi.h 				return ubi->fm->e[i];
e                 127 drivers/mtd/ubi/wl.c 				 struct ubi_wl_entry *e, struct rb_root *root);
e                 129 drivers/mtd/ubi/wl.c 			    struct ubi_wl_entry *e);
e                 139 drivers/mtd/ubi/wl.c static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
e                 150 drivers/mtd/ubi/wl.c 		if (e->ec < e1->ec)
e                 152 drivers/mtd/ubi/wl.c 		else if (e->ec > e1->ec)
e                 155 drivers/mtd/ubi/wl.c 			ubi_assert(e->pnum != e1->pnum);
e                 156 drivers/mtd/ubi/wl.c 			if (e->pnum < e1->pnum)
e                 163 drivers/mtd/ubi/wl.c 	rb_link_node(&e->u.rb, parent, p);
e                 164 drivers/mtd/ubi/wl.c 	rb_insert_color(&e->u.rb, root);
e                 175 drivers/mtd/ubi/wl.c static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e)
e                 177 drivers/mtd/ubi/wl.c 	ubi->lookuptbl[e->pnum] = NULL;
e                 178 drivers/mtd/ubi/wl.c 	kmem_cache_free(ubi_wl_entry_slab, e);
e                 236 drivers/mtd/ubi/wl.c static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
e                 246 drivers/mtd/ubi/wl.c 		if (e->pnum == e1->pnum) {
e                 247 drivers/mtd/ubi/wl.c 			ubi_assert(e == e1);
e                 251 drivers/mtd/ubi/wl.c 		if (e->ec < e1->ec)
e                 253 drivers/mtd/ubi/wl.c 		else if (e->ec > e1->ec)
e                 256 drivers/mtd/ubi/wl.c 			ubi_assert(e->pnum != e1->pnum);
e                 257 drivers/mtd/ubi/wl.c 			if (e->pnum < e1->pnum)
e                 275 drivers/mtd/ubi/wl.c static inline int in_pq(const struct ubi_device *ubi, struct ubi_wl_entry *e)
e                 282 drivers/mtd/ubi/wl.c 			if (p == e)
e                 298 drivers/mtd/ubi/wl.c static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
e                 305 drivers/mtd/ubi/wl.c 	list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
e                 306 drivers/mtd/ubi/wl.c 	dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
e                 322 drivers/mtd/ubi/wl.c 	struct ubi_wl_entry *e, *prev_e = NULL;
e                 325 drivers/mtd/ubi/wl.c 	e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
e                 326 drivers/mtd/ubi/wl.c 	max = e->ec + diff;
e                 337 drivers/mtd/ubi/wl.c 			prev_e = e;
e                 338 drivers/mtd/ubi/wl.c 			e = e1;
e                 346 drivers/mtd/ubi/wl.c 	    !ubi->fm && e->pnum < UBI_FM_MAX_START)
e                 349 drivers/mtd/ubi/wl.c 	return e;
e                 364 drivers/mtd/ubi/wl.c 	struct ubi_wl_entry *e, *first, *last;
e                 370 drivers/mtd/ubi/wl.c 		e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
e                 375 drivers/mtd/ubi/wl.c 		e = may_reserve_for_fm(ubi, e, root);
e                 377 drivers/mtd/ubi/wl.c 		e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
e                 379 drivers/mtd/ubi/wl.c 	return e;
e                 392 drivers/mtd/ubi/wl.c 	struct ubi_wl_entry *e;
e                 394 drivers/mtd/ubi/wl.c 	e = find_mean_wl_entry(ubi, &ubi->free);
e                 395 drivers/mtd/ubi/wl.c 	if (!e) {
e                 400 drivers/mtd/ubi/wl.c 	self_check_in_wl_tree(ubi, e, &ubi->free);
e                 406 drivers/mtd/ubi/wl.c 	rb_erase(&e->u.rb, &ubi->free);
e                 408 drivers/mtd/ubi/wl.c 	dbg_wl("PEB %d EC %d", e->pnum, e->ec);
e                 410 drivers/mtd/ubi/wl.c 	return e;
e                 423 drivers/mtd/ubi/wl.c 	struct ubi_wl_entry *e;
e                 425 drivers/mtd/ubi/wl.c 	e = ubi->lookuptbl[pnum];
e                 426 drivers/mtd/ubi/wl.c 	if (!e)
e                 429 drivers/mtd/ubi/wl.c 	if (self_check_in_pq(ubi, e))
e                 432 drivers/mtd/ubi/wl.c 	list_del(&e->u.list);
e                 433 drivers/mtd/ubi/wl.c 	dbg_wl("deleted PEB %d from the protection queue", e->pnum);
e                 446 drivers/mtd/ubi/wl.c static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
e                 451 drivers/mtd/ubi/wl.c 	unsigned long long ec = e->ec;
e                 453 drivers/mtd/ubi/wl.c 	dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
e                 455 drivers/mtd/ubi/wl.c 	err = self_check_ec(ubi, e->pnum, e->ec);
e                 463 drivers/mtd/ubi/wl.c 	err = ubi_io_sync_erase(ubi, e->pnum, torture);
e                 474 drivers/mtd/ubi/wl.c 			e->pnum, ec);
e                 479 drivers/mtd/ubi/wl.c 	dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
e                 483 drivers/mtd/ubi/wl.c 	err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
e                 487 drivers/mtd/ubi/wl.c 	e->ec = ec;
e                 489 drivers/mtd/ubi/wl.c 	if (e->ec > ubi->max_ec)
e                 490 drivers/mtd/ubi/wl.c 		ubi->max_ec = e->ec;
e                 508 drivers/mtd/ubi/wl.c 	struct ubi_wl_entry *e, *tmp;
e                 518 drivers/mtd/ubi/wl.c 	list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
e                 520 drivers/mtd/ubi/wl.c 			e->pnum, e->ec);
e                 522 drivers/mtd/ubi/wl.c 		list_del(&e->u.list);
e                 523 drivers/mtd/ubi/wl.c 		wl_tree_add(e, &ubi->used);
e                 590 drivers/mtd/ubi/wl.c static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
e                 595 drivers/mtd/ubi/wl.c 	ubi_assert(e);
e                 598 drivers/mtd/ubi/wl.c 	       e->pnum, e->ec, torture);
e                 605 drivers/mtd/ubi/wl.c 	wl_wrk->e = e;
e                 627 drivers/mtd/ubi/wl.c static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
e                 632 drivers/mtd/ubi/wl.c 	dbg_wl("sync erase of PEB %i", e->pnum);
e                 634 drivers/mtd/ubi/wl.c 	wl_wrk.e = e;
e                1084 drivers/mtd/ubi/wl.c 	struct ubi_wl_entry *e = wl_wrk->e;
e                1085 drivers/mtd/ubi/wl.c 	int pnum = e->pnum;
e                1091 drivers/mtd/ubi/wl.c 	       pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
e                1093 drivers/mtd/ubi/wl.c 	err = sync_erase(ubi, e, wl_wrk->torture);
e                1096 drivers/mtd/ubi/wl.c 		wl_tree_add(e, &ubi->free);
e                1118 drivers/mtd/ubi/wl.c 		err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
e                1120 drivers/mtd/ubi/wl.c 			wl_entry_destroy(ubi, e);
e                1127 drivers/mtd/ubi/wl.c 	wl_entry_destroy(ubi, e);
e                1202 drivers/mtd/ubi/wl.c 		struct ubi_wl_entry *e = wl_wrk->e;
e                1204 drivers/mtd/ubi/wl.c 		dbg_wl("cancel erasure of PEB %d EC %d", e->pnum, e->ec);
e                1206 drivers/mtd/ubi/wl.c 		wl_entry_destroy(ubi, e);
e                1232 drivers/mtd/ubi/wl.c 	struct ubi_wl_entry *e;
e                1242 drivers/mtd/ubi/wl.c 	e = ubi->lookuptbl[pnum];
e                1243 drivers/mtd/ubi/wl.c 	if (e == ubi->move_from) {
e                1256 drivers/mtd/ubi/wl.c 	} else if (e == ubi->move_to) {
e                1273 drivers/mtd/ubi/wl.c 		if (in_wl_tree(e, &ubi->used)) {
e                1274 drivers/mtd/ubi/wl.c 			self_check_in_wl_tree(ubi, e, &ubi->used);
e                1275 drivers/mtd/ubi/wl.c 			rb_erase(&e->u.rb, &ubi->used);
e                1276 drivers/mtd/ubi/wl.c 		} else if (in_wl_tree(e, &ubi->scrub)) {
e                1277 drivers/mtd/ubi/wl.c 			self_check_in_wl_tree(ubi, e, &ubi->scrub);
e                1278 drivers/mtd/ubi/wl.c 			rb_erase(&e->u.rb, &ubi->scrub);
e                1279 drivers/mtd/ubi/wl.c 		} else if (in_wl_tree(e, &ubi->erroneous)) {
e                1280 drivers/mtd/ubi/wl.c 			self_check_in_wl_tree(ubi, e, &ubi->erroneous);
e                1281 drivers/mtd/ubi/wl.c 			rb_erase(&e->u.rb, &ubi->erroneous);
e                1287 drivers/mtd/ubi/wl.c 			err = prot_queue_del(ubi, e->pnum);
e                1299 drivers/mtd/ubi/wl.c 	err = schedule_erase(ubi, e, vol_id, lnum, torture, false);
e                1302 drivers/mtd/ubi/wl.c 		wl_tree_add(e, &ubi->used);
e                1322 drivers/mtd/ubi/wl.c 	struct ubi_wl_entry *e;
e                1328 drivers/mtd/ubi/wl.c 	e = ubi->lookuptbl[pnum];
e                1329 drivers/mtd/ubi/wl.c 	if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
e                1330 drivers/mtd/ubi/wl.c 				   in_wl_tree(e, &ubi->erroneous)) {
e                1335 drivers/mtd/ubi/wl.c 	if (e == ubi->move_to) {
e                1348 drivers/mtd/ubi/wl.c 	if (in_wl_tree(e, &ubi->used)) {
e                1349 drivers/mtd/ubi/wl.c 		self_check_in_wl_tree(ubi, e, &ubi->used);
e                1350 drivers/mtd/ubi/wl.c 		rb_erase(&e->u.rb, &ubi->used);
e                1354 drivers/mtd/ubi/wl.c 		err = prot_queue_del(ubi, e->pnum);
e                1363 drivers/mtd/ubi/wl.c 	wl_tree_add(e, &ubi->scrub);
e                1436 drivers/mtd/ubi/wl.c static bool scrub_possible(struct ubi_device *ubi, struct ubi_wl_entry *e)
e                1438 drivers/mtd/ubi/wl.c 	if (in_wl_tree(e, &ubi->scrub))
e                1440 drivers/mtd/ubi/wl.c 	else if (in_wl_tree(e, &ubi->erroneous))
e                1442 drivers/mtd/ubi/wl.c 	else if (ubi->move_from == e)
e                1444 drivers/mtd/ubi/wl.c 	else if (ubi->move_to == e)
e                1472 drivers/mtd/ubi/wl.c 	struct ubi_wl_entry *e;
e                1490 drivers/mtd/ubi/wl.c 	e = ubi->lookuptbl[pnum];
e                1491 drivers/mtd/ubi/wl.c 	if (!e) {
e                1500 drivers/mtd/ubi/wl.c 	if (!scrub_possible(ubi, e)) {
e                1523 drivers/mtd/ubi/wl.c 		e = ubi->lookuptbl[pnum];
e                1524 drivers/mtd/ubi/wl.c 		if (!e) {
e                1533 drivers/mtd/ubi/wl.c 		if (!scrub_possible(ubi, e)) {
e                1539 drivers/mtd/ubi/wl.c 		if (in_pq(ubi, e)) {
e                1540 drivers/mtd/ubi/wl.c 			prot_queue_del(ubi, e->pnum);
e                1541 drivers/mtd/ubi/wl.c 			wl_tree_add(e, &ubi->scrub);
e                1545 drivers/mtd/ubi/wl.c 		} else if (in_wl_tree(e, &ubi->used)) {
e                1546 drivers/mtd/ubi/wl.c 			rb_erase(&e->u.rb, &ubi->used);
e                1547 drivers/mtd/ubi/wl.c 			wl_tree_add(e, &ubi->scrub);
e                1551 drivers/mtd/ubi/wl.c 		} else if (in_wl_tree(e, &ubi->free)) {
e                1552 drivers/mtd/ubi/wl.c 			rb_erase(&e->u.rb, &ubi->free);
e                1560 drivers/mtd/ubi/wl.c 			err = schedule_erase(ubi, e, UBI_UNKNOWN, UBI_UNKNOWN,
e                1588 drivers/mtd/ubi/wl.c 	struct ubi_wl_entry *e;
e                1597 drivers/mtd/ubi/wl.c 			e = rb_entry(rb, struct ubi_wl_entry, u.rb);
e                1601 drivers/mtd/ubi/wl.c 				if (rb->rb_left == &e->u.rb)
e                1607 drivers/mtd/ubi/wl.c 			wl_entry_destroy(ubi, e);
e                1695 drivers/mtd/ubi/wl.c 	struct ubi_wl_entry *e;
e                1698 drivers/mtd/ubi/wl.c 	e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
e                1699 drivers/mtd/ubi/wl.c 	if (!e)
e                1702 drivers/mtd/ubi/wl.c 	e->pnum = aeb->pnum;
e                1703 drivers/mtd/ubi/wl.c 	e->ec = aeb->ec;
e                1704 drivers/mtd/ubi/wl.c 	ubi->lookuptbl[e->pnum] = e;
e                1707 drivers/mtd/ubi/wl.c 		err = sync_erase(ubi, e, false);
e                1711 drivers/mtd/ubi/wl.c 		wl_tree_add(e, &ubi->free);
e                1714 drivers/mtd/ubi/wl.c 		err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false);
e                1722 drivers/mtd/ubi/wl.c 	wl_entry_destroy(ubi, e);
e                1741 drivers/mtd/ubi/wl.c 	struct ubi_wl_entry *e;
e                1775 drivers/mtd/ubi/wl.c 		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
e                1776 drivers/mtd/ubi/wl.c 		if (!e) {
e                1781 drivers/mtd/ubi/wl.c 		e->pnum = aeb->pnum;
e                1782 drivers/mtd/ubi/wl.c 		e->ec = aeb->ec;
e                1783 drivers/mtd/ubi/wl.c 		ubi_assert(e->ec >= 0);
e                1785 drivers/mtd/ubi/wl.c 		wl_tree_add(e, &ubi->free);
e                1788 drivers/mtd/ubi/wl.c 		ubi->lookuptbl[e->pnum] = e;
e                1797 drivers/mtd/ubi/wl.c 			e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
e                1798 drivers/mtd/ubi/wl.c 			if (!e) {
e                1803 drivers/mtd/ubi/wl.c 			e->pnum = aeb->pnum;
e                1804 drivers/mtd/ubi/wl.c 			e->ec = aeb->ec;
e                1805 drivers/mtd/ubi/wl.c 			ubi->lookuptbl[e->pnum] = e;
e                1809 drivers/mtd/ubi/wl.c 				       e->pnum, e->ec);
e                1810 drivers/mtd/ubi/wl.c 				wl_tree_add(e, &ubi->used);
e                1813 drivers/mtd/ubi/wl.c 				       e->pnum, e->ec);
e                1814 drivers/mtd/ubi/wl.c 				wl_tree_add(e, &ubi->scrub);
e                1824 drivers/mtd/ubi/wl.c 		e = ubi_find_fm_block(ubi, aeb->pnum);
e                1826 drivers/mtd/ubi/wl.c 		if (e) {
e                1827 drivers/mtd/ubi/wl.c 			ubi_assert(!ubi->lookuptbl[e->pnum]);
e                1828 drivers/mtd/ubi/wl.c 			ubi->lookuptbl[e->pnum] = e;
e                1903 drivers/mtd/ubi/wl.c 	struct ubi_wl_entry *e, *tmp;
e                1906 drivers/mtd/ubi/wl.c 		list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
e                1907 drivers/mtd/ubi/wl.c 			list_del(&e->u.list);
e                1908 drivers/mtd/ubi/wl.c 			wl_entry_destroy(ubi, e);
e                1984 drivers/mtd/ubi/wl.c 				 struct ubi_wl_entry *e, struct rb_root *root)
e                1989 drivers/mtd/ubi/wl.c 	if (in_wl_tree(e, root))
e                1993 drivers/mtd/ubi/wl.c 		e->pnum, e->ec, root);
e                2007 drivers/mtd/ubi/wl.c 			    struct ubi_wl_entry *e)
e                2012 drivers/mtd/ubi/wl.c 	if (in_pq(ubi, e))
e                2016 drivers/mtd/ubi/wl.c 		e->pnum, e->ec);
e                2023 drivers/mtd/ubi/wl.c 	struct ubi_wl_entry *e;
e                2025 drivers/mtd/ubi/wl.c 	e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
e                2026 drivers/mtd/ubi/wl.c 	self_check_in_wl_tree(ubi, e, &ubi->free);
e                2029 drivers/mtd/ubi/wl.c 	rb_erase(&e->u.rb, &ubi->free);
e                2031 drivers/mtd/ubi/wl.c 	return e;
e                2072 drivers/mtd/ubi/wl.c 	struct ubi_wl_entry *e;
e                2095 drivers/mtd/ubi/wl.c 	e = wl_get_wle(ubi);
e                2096 drivers/mtd/ubi/wl.c 	prot_queue_add(ubi, e);
e                2099 drivers/mtd/ubi/wl.c 	err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
e                2102 drivers/mtd/ubi/wl.c 		ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);
e                2106 drivers/mtd/ubi/wl.c 	return e->pnum;
e                  17 drivers/mtd/ubi/wl.h 					       struct ubi_wl_entry *e,
e                  24 drivers/mtd/ubi/wl.h 					       struct ubi_wl_entry *e,
e                  26 drivers/mtd/ubi/wl.h 	return e;
e                1973 drivers/net/dsa/b53/b53_common.c int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
e                1983 drivers/net/dsa/b53/b53_common.c 	e->eee_enabled = p->eee_enabled;
e                1984 drivers/net/dsa/b53/b53_common.c 	e->eee_active = !!(reg & BIT(port));
e                1990 drivers/net/dsa/b53/b53_common.c int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
e                1998 drivers/net/dsa/b53/b53_common.c 	p->eee_enabled = e->eee_enabled;
e                1999 drivers/net/dsa/b53/b53_common.c 	b53_eee_enable_set(ds, port, e->eee_enabled);
e                 364 drivers/net/dsa/b53/b53_priv.h int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
e                 365 drivers/net/dsa/b53/b53_priv.h int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
e                 479 drivers/net/dsa/mt7530.h static inline void mt7530_hw_vlan_entry_init(struct mt7530_hw_vlan_entry *e,
e                 482 drivers/net/dsa/mt7530.h 	e->port = port;
e                 483 drivers/net/dsa/mt7530.h 	e->untagged = untagged;
e                1047 drivers/net/dsa/mv88e6xxx/chip.c 				 struct ethtool_eee *e)
e                1054 drivers/net/dsa/mv88e6xxx/chip.c 				 struct ethtool_eee *e)
e                 848 drivers/net/dsa/qca8k.c qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
e                1061 drivers/net/ethernet/broadcom/genet/bcmgenet.c static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
e                1072 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	e->eee_enabled = p->eee_enabled;
e                1073 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	e->eee_active = p->eee_active;
e                1074 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
e                1076 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	return phy_ethtool_get_eee(dev->phydev, e);
e                1079 drivers/net/ethernet/broadcom/genet/bcmgenet.c static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
e                1091 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	p->eee_enabled = e->eee_enabled;
e                1102 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
e                1106 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	return phy_ethtool_set_eee(dev->phydev, e);
e                 523 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		const struct espi_intr_counts *e;
e                 525 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		e = t1_espi_get_intr_counts(adapter->espi);
e                 526 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		*data++ = e->DIP2_parity_err;
e                 527 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		*data++ = e->DIP4_err;
e                 528 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		*data++ = e->rx_drops;
e                 529 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		*data++ = e->tx_drops;
e                 530 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		*data++ = e->rx_ovflw;
e                 531 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		*data++ = e->parity_err;
e                 713 drivers/net/ethernet/chelsio/cxgb/cxgb2.c static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
e                 718 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	e->rx_max_pending = MAX_RX_BUFFERS;
e                 719 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
e                 720 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	e->tx_max_pending = MAX_CMDQ_ENTRIES;
e                 722 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
e                 723 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
e                 724 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	e->tx_pending = adapter->params.sge.cmdQ_size[0];
e                 727 drivers/net/ethernet/chelsio/cxgb/cxgb2.c static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
e                 732 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
e                 733 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	    e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
e                 734 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	    e->tx_pending > MAX_CMDQ_ENTRIES ||
e                 735 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	    e->rx_pending < MIN_FL_ENTRIES ||
e                 736 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	    e->rx_jumbo_pending < MIN_FL_ENTRIES ||
e                 737 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	    e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
e                 743 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
e                 744 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
e                 745 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	adapter->params.sge.cmdQ_size[0] = e->tx_pending;
e                 746 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
e                 747 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		MAX_CMDQ1_ENTRIES : e->tx_pending;
e                 782 drivers/net/ethernet/chelsio/cxgb/cxgb2.c static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
e                 789 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	e->magic = EEPROM_MAGIC(adapter);
e                 790 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
e                 792 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	memcpy(data, buf + e->offset, e->len);
e                 828 drivers/net/ethernet/chelsio/cxgb/sge.c 	struct freelQ_e *e = &q->entries[q->pidx];
e                 847 drivers/net/ethernet/chelsio/cxgb/sge.c 		e->addr_lo = (u32)mapping;
e                 848 drivers/net/ethernet/chelsio/cxgb/sge.c 		e->addr_hi = (u64)mapping >> 32;
e                 849 drivers/net/ethernet/chelsio/cxgb/sge.c 		e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
e                 851 drivers/net/ethernet/chelsio/cxgb/sge.c 		e->gen2 = V_CMD_GEN2(q->genbit);
e                 853 drivers/net/ethernet/chelsio/cxgb/sge.c 		e++;
e                 859 drivers/net/ethernet/chelsio/cxgb/sge.c 			e = q->entries;
e                1140 drivers/net/ethernet/chelsio/cxgb/sge.c static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping,
e                1146 drivers/net/ethernet/chelsio/cxgb/sge.c 	e->addr_lo = (u32)mapping;
e                1147 drivers/net/ethernet/chelsio/cxgb/sge.c 	e->addr_hi = (u64)mapping >> 32;
e                1148 drivers/net/ethernet/chelsio/cxgb/sge.c 	e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen);
e                1149 drivers/net/ethernet/chelsio/cxgb/sge.c 	e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen);
e                1159 drivers/net/ethernet/chelsio/cxgb/sge.c 						     struct cmdQ_e **e,
e                1168 drivers/net/ethernet/chelsio/cxgb/sge.c 		struct cmdQ_e *e1 = *e;
e                1189 drivers/net/ethernet/chelsio/cxgb/sge.c 		*e = e1;
e                1204 drivers/net/ethernet/chelsio/cxgb/sge.c 	struct cmdQ_e *e, *e1;
e                1209 drivers/net/ethernet/chelsio/cxgb/sge.c 	e = e1 = &q->entries[pidx];
e                1223 drivers/net/ethernet/chelsio/cxgb/sge.c 	e->addr_lo = (u32)desc_mapping;
e                1224 drivers/net/ethernet/chelsio/cxgb/sge.c 	e->addr_hi = (u64)desc_mapping >> 32;
e                1225 drivers/net/ethernet/chelsio/cxgb/sge.c 	e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen);
e                1282 drivers/net/ethernet/chelsio/cxgb/sge.c 	e->flags = flags;
e                1470 drivers/net/ethernet/chelsio/cxgb/sge.c 	struct respQ_e *e = &q->entries[q->cidx];
e                1475 drivers/net/ethernet/chelsio/cxgb/sge.c 	while (done < budget && e->GenerationBit == q->genbit) {
e                1476 drivers/net/ethernet/chelsio/cxgb/sge.c 		flags |= e->Qsleeping;
e                1478 drivers/net/ethernet/chelsio/cxgb/sge.c 		cmdq_processed[0] += e->Cmdq0CreditReturn;
e                1479 drivers/net/ethernet/chelsio/cxgb/sge.c 		cmdq_processed[1] += e->Cmdq1CreditReturn;
e                1495 drivers/net/ethernet/chelsio/cxgb/sge.c 		if (likely(e->DataValid)) {
e                1496 drivers/net/ethernet/chelsio/cxgb/sge.c 			struct freelQ *fl = &sge->freelQ[e->FreelistQid];
e                1498 drivers/net/ethernet/chelsio/cxgb/sge.c 			BUG_ON(!e->Sop || !e->Eop);
e                1499 drivers/net/ethernet/chelsio/cxgb/sge.c 			if (unlikely(e->Offload))
e                1502 drivers/net/ethernet/chelsio/cxgb/sge.c 				sge_rx(sge, fl, e->BufferLength);
e                1520 drivers/net/ethernet/chelsio/cxgb/sge.c 		e++;
e                1524 drivers/net/ethernet/chelsio/cxgb/sge.c 			e = q->entries;
e                1526 drivers/net/ethernet/chelsio/cxgb/sge.c 		prefetch(e);
e                1543 drivers/net/ethernet/chelsio/cxgb/sge.c 	const struct respQ_e *e = &Q->entries[Q->cidx];
e                1545 drivers/net/ethernet/chelsio/cxgb/sge.c 	return e->GenerationBit == Q->genbit;
e                1560 drivers/net/ethernet/chelsio/cxgb/sge.c 	struct respQ_e *e = &q->entries[q->cidx];
e                1561 drivers/net/ethernet/chelsio/cxgb/sge.c 	const struct freelQ *fl = &sge->freelQ[e->FreelistQid];
e                1566 drivers/net/ethernet/chelsio/cxgb/sge.c 	if (e->DataValid)
e                1570 drivers/net/ethernet/chelsio/cxgb/sge.c 		flags |= e->Qsleeping;
e                1572 drivers/net/ethernet/chelsio/cxgb/sge.c 		cmdq_processed[0] += e->Cmdq0CreditReturn;
e                1573 drivers/net/ethernet/chelsio/cxgb/sge.c 		cmdq_processed[1] += e->Cmdq1CreditReturn;
e                1575 drivers/net/ethernet/chelsio/cxgb/sge.c 		e++;
e                1579 drivers/net/ethernet/chelsio/cxgb/sge.c 			e = q->entries;
e                1581 drivers/net/ethernet/chelsio/cxgb/sge.c 		prefetch(e);
e                1588 drivers/net/ethernet/chelsio/cxgb/sge.c 	} while (e->GenerationBit == q->genbit && !e->DataValid);
e                1593 drivers/net/ethernet/chelsio/cxgb/sge.c 	return e->GenerationBit == q->genbit;
e                1762 drivers/net/ethernet/chelsio/cxgb/sge.c 	const struct ethhdr *e = data;
e                1764 drivers/net/ethernet/chelsio/cxgb/sge.c 	return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN;
e                  77 drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h 	union listen_entry *e;
e                  82 drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h 	e = stid2entry(t, tid);
e                  83 drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h 	if ((void *)e->next >= (void *)t->tid_tab &&
e                  84 drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h 	    (void *)e->next < (void *)&t->atid_tab[t->natids])
e                  87 drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h 	return &e->t3c_tid;
e                  96 drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h 	union active_open_entry *e;
e                 101 drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h 	e = atid2entry(t, tid);
e                 102 drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h 	if ((void *)e->next >= (void *)t->tid_tab &&
e                 103 drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h 	    (void *)e->next < (void *)&t->atid_tab[t->natids])
e                 106 drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h 	return &e->t3c_tid;
e                1953 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
e                1959 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	e->rx_max_pending = MAX_RX_BUFFERS;
e                1960 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
e                1961 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	e->tx_max_pending = MAX_TXQ_ENTRIES;
e                1963 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	e->rx_pending = q->fl_size;
e                1964 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	e->rx_mini_pending = q->rspq_size;
e                1965 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	e->rx_jumbo_pending = q->jumbo_size;
e                1966 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	e->tx_pending = q->txq_size[0];
e                1969 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
e                1976 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	if (e->rx_pending > MAX_RX_BUFFERS ||
e                1977 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	    e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
e                1978 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	    e->tx_pending > MAX_TXQ_ENTRIES ||
e                1979 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	    e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
e                1980 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	    e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
e                1981 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	    e->rx_pending < MIN_FL_ENTRIES ||
e                1982 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	    e->rx_jumbo_pending < MIN_FL_ENTRIES ||
e                1983 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	    e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
e                1991 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		q->rspq_size = e->rx_mini_pending;
e                1992 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		q->fl_size = e->rx_pending;
e                1993 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		q->jumbo_size = e->rx_jumbo_pending;
e                1994 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		q->txq_size[0] = e->tx_pending;
e                1995 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		q->txq_size[1] = e->tx_pending;
e                1996 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		q->txq_size[2] = e->tx_pending;
e                2032 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
e                2043 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	e->magic = EEPROM_MAGIC;
e                2044 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
e                2048 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		memcpy(data, buf + e->offset, e->len);
e                 461 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		struct ch_embedded_info *e = data;
e                 464 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		t3_get_fw_version(adapter, &e->fw_vers);
e                 465 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		t3_get_tp_version(adapter, &e->tp_vers);
e                1088 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
e                1106 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx));
e                1119 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	struct l2t_entry *e;
e                1130 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	e = t3_l2t_get(tdev, new, dev, daddr);
e                1131 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	if (!e) {
e                1142 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 			update_tcb = te->client->redirect(te->ctx, old, new, e);
e                1145 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 				l2t_hold(L2DATA(tdev), e);
e                1147 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 				set_l2t_ix(tdev, tid, e);
e                1151 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 	l2t_release(tdev, e);
e                  63 drivers/net/ethernet/chelsio/cxgb3/l2t.c static inline unsigned int vlan_prio(const struct l2t_entry *e)
e                  65 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	return e->vlan >> 13;
e                  74 drivers/net/ethernet/chelsio/cxgb3/l2t.c static inline void neigh_replace(struct l2t_entry *e, struct neighbour *n)
e                  77 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	if (e->neigh)
e                  78 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		neigh_release(e->neigh);
e                  79 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	e->neigh = n;
e                  88 drivers/net/ethernet/chelsio/cxgb3/l2t.c 				  struct l2t_entry *e)
e                 101 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx));
e                 102 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) |
e                 103 drivers/net/ethernet/chelsio/cxgb3/l2t.c 			    V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) |
e                 104 drivers/net/ethernet/chelsio/cxgb3/l2t.c 			    V_L2T_W_PRIO(vlan_prio(e)));
e                 105 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
e                 106 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
e                 110 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	skb_queue_walk_safe(&e->arpq, skb, tmp) {
e                 111 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		__skb_unlink(skb, &e->arpq);
e                 114 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	e->state = L2T_STATE_VALID;
e                 123 drivers/net/ethernet/chelsio/cxgb3/l2t.c static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
e                 125 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	__skb_queue_tail(&e->arpq, skb);
e                 129 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		     struct l2t_entry *e)
e                 132 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	switch (e->state) {
e                 134 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		neigh_event_send(e->neigh, NULL);
e                 135 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		spin_lock_bh(&e->lock);
e                 136 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		if (e->state == L2T_STATE_STALE)
e                 137 drivers/net/ethernet/chelsio/cxgb3/l2t.c 			e->state = L2T_STATE_VALID;
e                 138 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		spin_unlock_bh(&e->lock);
e                 143 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		spin_lock_bh(&e->lock);
e                 144 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		if (e->state != L2T_STATE_RESOLVING) {
e                 146 drivers/net/ethernet/chelsio/cxgb3/l2t.c 			spin_unlock_bh(&e->lock);
e                 149 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		arpq_enqueue(e, skb);
e                 150 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		spin_unlock_bh(&e->lock);
e                 160 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		if (!neigh_event_send(e->neigh, NULL)) {
e                 166 drivers/net/ethernet/chelsio/cxgb3/l2t.c 			spin_lock_bh(&e->lock);
e                 167 drivers/net/ethernet/chelsio/cxgb3/l2t.c 			if (!skb_queue_empty(&e->arpq))
e                 168 drivers/net/ethernet/chelsio/cxgb3/l2t.c 				setup_l2e_send_pending(dev, skb, e);
e                 171 drivers/net/ethernet/chelsio/cxgb3/l2t.c 			spin_unlock_bh(&e->lock);
e                 179 drivers/net/ethernet/chelsio/cxgb3/l2t.c void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e)
e                 182 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	switch (e->state) {
e                 184 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		neigh_event_send(e->neigh, NULL);
e                 185 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		spin_lock_bh(&e->lock);
e                 186 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		if (e->state == L2T_STATE_STALE) {
e                 187 drivers/net/ethernet/chelsio/cxgb3/l2t.c 			e->state = L2T_STATE_VALID;
e                 189 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		spin_unlock_bh(&e->lock);
e                 194 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		spin_lock_bh(&e->lock);
e                 195 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		if (e->state != L2T_STATE_RESOLVING) {
e                 197 drivers/net/ethernet/chelsio/cxgb3/l2t.c 			spin_unlock_bh(&e->lock);
e                 200 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		spin_unlock_bh(&e->lock);
e                 210 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		neigh_event_send(e->neigh, NULL);
e                 221 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	struct l2t_entry *end, *e, **p;
e                 227 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e)
e                 228 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		if (atomic_read(&e->refcnt) == 0)
e                 231 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	for (e = &d->l2tab[1]; atomic_read(&e->refcnt); ++e) ;
e                 233 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	d->rover = e + 1;
e                 240 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	if (e->state != L2T_STATE_UNUSED) {
e                 241 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		int hash = arp_hash(e->addr, e->ifindex, d);
e                 244 drivers/net/ethernet/chelsio/cxgb3/l2t.c 			if (*p == e) {
e                 245 drivers/net/ethernet/chelsio/cxgb3/l2t.c 				*p = e->next;
e                 248 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		e->state = L2T_STATE_UNUSED;
e                 250 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	return e;
e                 264 drivers/net/ethernet/chelsio/cxgb3/l2t.c void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e)
e                 266 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	spin_lock_bh(&e->lock);
e                 267 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	if (atomic_read(&e->refcnt) == 0) {	/* hasn't been recycled */
e                 268 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		if (e->neigh) {
e                 269 drivers/net/ethernet/chelsio/cxgb3/l2t.c 			neigh_release(e->neigh);
e                 270 drivers/net/ethernet/chelsio/cxgb3/l2t.c 			e->neigh = NULL;
e                 273 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	spin_unlock_bh(&e->lock);
e                 283 drivers/net/ethernet/chelsio/cxgb3/l2t.c static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
e                 287 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	spin_lock(&e->lock);	/* avoid race with t3_l2t_free */
e                 289 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	if (neigh != e->neigh)
e                 290 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		neigh_replace(e, neigh);
e                 292 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
e                 294 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		e->state = L2T_STATE_RESOLVING;
e                 296 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		e->state = L2T_STATE_VALID;
e                 298 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		e->state = L2T_STATE_STALE;
e                 299 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	spin_unlock(&e->lock);
e                 305 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	struct l2t_entry *e = NULL;
e                 334 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	for (e = d->l2tab[hash].first; e; e = e->next)
e                 335 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		if (e->addr == addr && e->ifindex == ifidx &&
e                 336 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		    e->smt_idx == smt_idx) {
e                 337 drivers/net/ethernet/chelsio/cxgb3/l2t.c 			l2t_hold(d, e);
e                 338 drivers/net/ethernet/chelsio/cxgb3/l2t.c 			if (atomic_read(&e->refcnt) == 1)
e                 339 drivers/net/ethernet/chelsio/cxgb3/l2t.c 				reuse_entry(e, neigh);
e                 344 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	e = alloc_l2e(d);
e                 345 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	if (e) {
e                 346 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		spin_lock(&e->lock);	/* avoid race with t3_l2t_free */
e                 347 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		e->next = d->l2tab[hash].first;
e                 348 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		d->l2tab[hash].first = e;
e                 349 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		e->state = L2T_STATE_RESOLVING;
e                 350 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		e->addr = addr;
e                 351 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		e->ifindex = ifidx;
e                 352 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		e->smt_idx = smt_idx;
e                 353 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		atomic_set(&e->refcnt, 1);
e                 354 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		neigh_replace(e, neigh);
e                 356 drivers/net/ethernet/chelsio/cxgb3/l2t.c 			e->vlan = vlan_dev_vlan_id(neigh->dev);
e                 358 drivers/net/ethernet/chelsio/cxgb3/l2t.c 			e->vlan = VLAN_NONE;
e                 359 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		spin_unlock(&e->lock);
e                 367 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	return e;
e                 402 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	struct l2t_entry *e;
e                 409 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	for (e = d->l2tab[hash].first; e; e = e->next)
e                 410 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		if (e->addr == addr && e->ifindex == ifidx) {
e                 411 drivers/net/ethernet/chelsio/cxgb3/l2t.c 			spin_lock(&e->lock);
e                 421 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	if (atomic_read(&e->refcnt)) {
e                 422 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		if (neigh != e->neigh)
e                 423 drivers/net/ethernet/chelsio/cxgb3/l2t.c 			neigh_replace(e, neigh);
e                 425 drivers/net/ethernet/chelsio/cxgb3/l2t.c 		if (e->state == L2T_STATE_RESOLVING) {
e                 427 drivers/net/ethernet/chelsio/cxgb3/l2t.c 				skb_queue_splice_init(&e->arpq, &arpq);
e                 429 drivers/net/ethernet/chelsio/cxgb3/l2t.c 				setup_l2e_send_pending(dev, NULL, e);
e                 431 drivers/net/ethernet/chelsio/cxgb3/l2t.c 			e->state = neigh->nud_state & NUD_CONNECTED ?
e                 433 drivers/net/ethernet/chelsio/cxgb3/l2t.c 			if (!ether_addr_equal(e->dmac, neigh->ha))
e                 434 drivers/net/ethernet/chelsio/cxgb3/l2t.c 				setup_l2e_send_pending(dev, NULL, e);
e                 437 drivers/net/ethernet/chelsio/cxgb3/l2t.c 	spin_unlock_bh(&e->lock);
e                 110 drivers/net/ethernet/chelsio/cxgb3/l2t.h void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
e                 115 drivers/net/ethernet/chelsio/cxgb3/l2t.h 		     struct l2t_entry *e);
e                 116 drivers/net/ethernet/chelsio/cxgb3/l2t.h void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
e                 122 drivers/net/ethernet/chelsio/cxgb3/l2t.h 			   struct l2t_entry *e)
e                 124 drivers/net/ethernet/chelsio/cxgb3/l2t.h 	if (likely(e->state == L2T_STATE_VALID))
e                 126 drivers/net/ethernet/chelsio/cxgb3/l2t.h 	return t3_l2t_send_slow(dev, skb, e);
e                 129 drivers/net/ethernet/chelsio/cxgb3/l2t.h static inline void l2t_release(struct t3cdev *t, struct l2t_entry *e)
e                 136 drivers/net/ethernet/chelsio/cxgb3/l2t.h 	if (atomic_dec_and_test(&e->refcnt) && d)
e                 137 drivers/net/ethernet/chelsio/cxgb3/l2t.h 		t3_l2e_free(d, e);
e                 142 drivers/net/ethernet/chelsio/cxgb3/l2t.h static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
e                 144 drivers/net/ethernet/chelsio/cxgb3/l2t.h 	if (d && atomic_add_return(1, &e->refcnt) == 1)	/* 0 -> 1 transition */
e                 274 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h struct cudbg_qdesc_entry *cudbg_next_qdesc(struct cudbg_qdesc_entry *e)
e                 277 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h 	       ((u8 *)e + sizeof(*e) + e->data_size);
e                 352 drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c 		const struct cxgb4_collect_entity *e = &e_arr[i];
e                 354 drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c 		entity_hdr = cudbg_get_entity_hdr(buf, e->entity);
e                 355 drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c 		entity_hdr->entity_type = e->entity;
e                 358 drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c 		ret = e->collect_cb(pdbg_init, dbg_buff, &cudbg_err);
e                1001 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		struct fw_devlog_e *e;
e                1009 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		e = &dinfo->log[index];
e                1010 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		if (e->timestamp == 0)
e                1018 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   be32_to_cpu(e->seqno),
e                1019 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   be64_to_cpu(e->timestamp),
e                1020 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   (e->level < ARRAY_SIZE(devlog_level_strings)
e                1021 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			    ? devlog_level_strings[e->level]
e                1023 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   (e->facility < ARRAY_SIZE(devlog_facility_strings)
e                1024 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			    ? devlog_facility_strings[e->facility]
e                1026 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		seq_printf(seq, e->fmt,
e                1027 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   be32_to_cpu(e->params[0]),
e                1028 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   be32_to_cpu(e->params[1]),
e                1029 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   be32_to_cpu(e->params[2]),
e                1030 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   be32_to_cpu(e->params[3]),
e                1031 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   be32_to_cpu(e->params[4]),
e                1032 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   be32_to_cpu(e->params[5]),
e                1033 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   be32_to_cpu(e->params[6]),
e                1034 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			   be32_to_cpu(e->params[7]));
e                1120 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		struct fw_devlog_e *e = &dinfo->log[index];
e                1123 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		if (e->timestamp == 0)
e                1126 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 		seqno = be32_to_cpu(e->seqno);
e                 823 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
e                 828 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	e->rx_max_pending = MAX_RX_BUFFERS;
e                 829 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
e                 830 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	e->rx_jumbo_max_pending = 0;
e                 831 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	e->tx_max_pending = MAX_TXQ_ENTRIES;
e                 833 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
e                 834 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
e                 835 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	e->rx_jumbo_pending = 0;
e                 836 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	e->tx_pending = s->ethtxq[pi->first_qset].q.size;
e                 839 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
e                 846 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
e                 847 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	    e->tx_pending > MAX_TXQ_ENTRIES ||
e                 848 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	    e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
e                 849 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	    e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
e                 850 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	    e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
e                 857 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 		s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
e                 858 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 		s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
e                 859 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 		s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
e                1133 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
e                1143 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	e->magic = EEPROM_MAGIC;
e                1144 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
e                1148 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 		memcpy(data, buf + e->offset, e->len);
e                3059 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	struct sched_class *e;
e                3120 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	e = cxgb4_sched_class_alloc(dev, &p);
e                3121 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	if (!e)
e                3127 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	qe.class = e->idx;
e                  65 drivers/net/ethernet/chelsio/cxgb4/l2t.c static inline unsigned int vlan_prio(const struct l2t_entry *e)
e                  67 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	return e->vlan >> VLAN_PRIO_SHIFT;
e                  70 drivers/net/ethernet/chelsio/cxgb4/l2t.c static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
e                  72 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	if (atomic_add_return(1, &e->refcnt) == 1)  /* 0 -> 1 transition */
e                 118 drivers/net/ethernet/chelsio/cxgb4/l2t.c static int addreq(const struct l2t_entry *e, const u32 *addr)
e                 120 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	if (e->v6)
e                 121 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) |
e                 122 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		       (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]);
e                 123 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	return e->addr[0] ^ addr[0];
e                 126 drivers/net/ethernet/chelsio/cxgb4/l2t.c static void neigh_replace(struct l2t_entry *e, struct neighbour *n)
e                 129 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	if (e->neigh)
e                 130 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		neigh_release(e->neigh);
e                 131 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	e->neigh = n;
e                 138 drivers/net/ethernet/chelsio/cxgb4/l2t.c static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
e                 141 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	unsigned int l2t_idx = e->idx + d->l2t_start;
e                 155 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	req->params = htons(L2T_W_PORT_V(e->lport) | L2T_W_NOREPLY_V(!sync));
e                 157 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	req->vlan = htons(e->vlan);
e                 158 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK))
e                 159 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
e                 160 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
e                 164 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	if (sync && e->state != L2T_STATE_SWITCHING)
e                 165 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		e->state = L2T_STATE_SYNC_WRITE;
e                 173 drivers/net/ethernet/chelsio/cxgb4/l2t.c static void send_pending(struct adapter *adap, struct l2t_entry *e)
e                 177 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	while ((skb = __skb_dequeue(&e->arpq)) != NULL)
e                 200 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		struct l2t_entry *e = &d->l2tab[l2t_idx - d->l2t_start];
e                 202 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		spin_lock(&e->lock);
e                 203 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		if (e->state != L2T_STATE_SWITCHING) {
e                 204 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			send_pending(adap, e);
e                 205 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			e->state = (e->neigh->nud_state & NUD_STALE) ?
e                 208 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		spin_unlock(&e->lock);
e                 216 drivers/net/ethernet/chelsio/cxgb4/l2t.c static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
e                 218 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	__skb_queue_tail(&e->arpq, skb);
e                 222 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		   struct l2t_entry *e)
e                 227 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	switch (e->state) {
e                 229 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		neigh_event_send(e->neigh, NULL);
e                 230 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		spin_lock_bh(&e->lock);
e                 231 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		if (e->state == L2T_STATE_STALE)
e                 232 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			e->state = L2T_STATE_VALID;
e                 233 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		spin_unlock_bh(&e->lock);
e                 239 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		spin_lock_bh(&e->lock);
e                 240 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		if (e->state != L2T_STATE_SYNC_WRITE &&
e                 241 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		    e->state != L2T_STATE_RESOLVING) {
e                 242 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			spin_unlock_bh(&e->lock);
e                 245 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		arpq_enqueue(e, skb);
e                 246 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		spin_unlock_bh(&e->lock);
e                 248 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		if (e->state == L2T_STATE_RESOLVING &&
e                 249 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		    !neigh_event_send(e->neigh, NULL)) {
e                 250 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			spin_lock_bh(&e->lock);
e                 251 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			if (e->state == L2T_STATE_RESOLVING &&
e                 252 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			    !skb_queue_empty(&e->arpq))
e                 253 drivers/net/ethernet/chelsio/cxgb4/l2t.c 				write_l2e(adap, e, 1);
e                 254 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			spin_unlock_bh(&e->lock);
e                 266 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	struct l2t_entry *end, *e, **p;
e                 272 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	for (e = d->rover, end = &d->l2tab[d->l2t_size]; e != end; ++e)
e                 273 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		if (atomic_read(&e->refcnt) == 0)
e                 276 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	for (e = d->l2tab; atomic_read(&e->refcnt); ++e)
e                 279 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	d->rover = e + 1;
e                 286 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	if (e->state < L2T_STATE_SWITCHING)
e                 287 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next)
e                 288 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			if (*p == e) {
e                 289 drivers/net/ethernet/chelsio/cxgb4/l2t.c 				*p = e->next;
e                 290 drivers/net/ethernet/chelsio/cxgb4/l2t.c 				e->next = NULL;
e                 294 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	e->state = L2T_STATE_UNUSED;
e                 295 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	return e;
e                 301 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	struct l2t_entry *end, *e, **p;
e                 304 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	for (e = &d->l2tab[0], end = &d->l2tab[d->l2t_size]; e != end; ++e) {
e                 305 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		if (atomic_read(&e->refcnt) == 0) {
e                 307 drivers/net/ethernet/chelsio/cxgb4/l2t.c 				first_free = e;
e                 309 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			if (e->state == L2T_STATE_SWITCHING) {
e                 310 drivers/net/ethernet/chelsio/cxgb4/l2t.c 				if (ether_addr_equal(e->dmac, dmac) &&
e                 311 drivers/net/ethernet/chelsio/cxgb4/l2t.c 				    (e->vlan == vlan) && (e->lport == port))
e                 318 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		e = first_free;
e                 328 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	if (e->state < L2T_STATE_SWITCHING)
e                 329 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next)
e                 330 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			if (*p == e) {
e                 331 drivers/net/ethernet/chelsio/cxgb4/l2t.c 				*p = e->next;
e                 332 drivers/net/ethernet/chelsio/cxgb4/l2t.c 				e->next = NULL;
e                 335 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	e->state = L2T_STATE_UNUSED;
e                 338 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	return e;
e                 351 drivers/net/ethernet/chelsio/cxgb4/l2t.c static void _t4_l2e_free(struct l2t_entry *e)
e                 356 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	if (atomic_read(&e->refcnt) == 0) {  /* hasn't been recycled */
e                 357 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		if (e->neigh) {
e                 358 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			neigh_release(e->neigh);
e                 359 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			e->neigh = NULL;
e                 361 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		while ((skb = __skb_dequeue(&e->arpq)) != NULL)
e                 365 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	d = container_of(e, struct l2t_data, l2tab[e->idx]);
e                 370 drivers/net/ethernet/chelsio/cxgb4/l2t.c static void t4_l2e_free(struct l2t_entry *e)
e                 375 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	spin_lock_bh(&e->lock);
e                 376 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	if (atomic_read(&e->refcnt) == 0) {  /* hasn't been recycled */
e                 377 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		if (e->neigh) {
e                 378 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			neigh_release(e->neigh);
e                 379 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			e->neigh = NULL;
e                 381 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		while ((skb = __skb_dequeue(&e->arpq)) != NULL)
e                 384 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	spin_unlock_bh(&e->lock);
e                 386 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	d = container_of(e, struct l2t_data, l2tab[e->idx]);
e                 390 drivers/net/ethernet/chelsio/cxgb4/l2t.c void cxgb4_l2t_release(struct l2t_entry *e)
e                 392 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	if (atomic_dec_and_test(&e->refcnt))
e                 393 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		t4_l2e_free(e);
e                 401 drivers/net/ethernet/chelsio/cxgb4/l2t.c static void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
e                 405 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	spin_lock(&e->lock);                /* avoid race with t4_l2t_free */
e                 406 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	if (neigh != e->neigh)
e                 407 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		neigh_replace(e, neigh);
e                 409 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
e                 411 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		e->state = L2T_STATE_RESOLVING;
e                 413 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		e->state = L2T_STATE_VALID;
e                 415 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		e->state = L2T_STATE_STALE;
e                 416 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	spin_unlock(&e->lock);
e                 425 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	struct l2t_entry *e;
e                 444 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	for (e = d->l2tab[hash].first; e; e = e->next)
e                 445 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		if (!addreq(e, addr) && e->ifindex == ifidx &&
e                 446 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		    e->vlan == vlan && e->lport == lport) {
e                 447 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			l2t_hold(d, e);
e                 448 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			if (atomic_read(&e->refcnt) == 1)
e                 449 drivers/net/ethernet/chelsio/cxgb4/l2t.c 				reuse_entry(e, neigh);
e                 454 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	e = alloc_l2e(d);
e                 455 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	if (e) {
e                 456 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		spin_lock(&e->lock);          /* avoid race with t4_l2t_free */
e                 457 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		e->state = L2T_STATE_RESOLVING;
e                 459 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			memcpy(e->dmac, physdev->dev_addr, sizeof(e->dmac));
e                 460 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		memcpy(e->addr, addr, addr_len);
e                 461 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		e->ifindex = ifidx;
e                 462 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		e->hash = hash;
e                 463 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		e->lport = lport;
e                 464 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		e->v6 = addr_len == 16;
e                 465 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		atomic_set(&e->refcnt, 1);
e                 466 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		neigh_replace(e, neigh);
e                 467 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		e->vlan = vlan;
e                 468 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		e->next = d->l2tab[hash].first;
e                 469 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		d->l2tab[hash].first = e;
e                 470 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		spin_unlock(&e->lock);
e                 474 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	return e;
e                 514 drivers/net/ethernet/chelsio/cxgb4/l2t.c static void handle_failed_resolution(struct adapter *adap, struct l2t_entry *e)
e                 518 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	while ((skb = __skb_dequeue(&e->arpq)) != NULL) {
e                 521 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		spin_unlock(&e->lock);
e                 526 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		spin_lock(&e->lock);
e                 536 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	struct l2t_entry *e;
e                 545 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	for (e = d->l2tab[hash].first; e; e = e->next)
e                 546 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		if (!addreq(e, addr) && e->ifindex == ifidx) {
e                 547 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			spin_lock(&e->lock);
e                 548 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			if (atomic_read(&e->refcnt))
e                 550 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			spin_unlock(&e->lock);
e                 559 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	if (neigh != e->neigh)
e                 560 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		neigh_replace(e, neigh);
e                 562 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	if (e->state == L2T_STATE_RESOLVING) {
e                 564 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			arpq = &e->arpq;
e                 566 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			   !skb_queue_empty(&e->arpq)) {
e                 567 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			write_l2e(adap, e, 1);
e                 570 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		e->state = neigh->nud_state & NUD_CONNECTED ?
e                 572 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)))
e                 573 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			write_l2e(adap, e, 0);
e                 577 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		handle_failed_resolution(adap, e);
e                 578 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	spin_unlock_bh(&e->lock);
e                 589 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	struct l2t_entry *e;
e                 593 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	e = find_or_alloc_l2e(d, vlan, port, eth_addr);
e                 594 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	if (e) {
e                 595 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		spin_lock(&e->lock);          /* avoid race with t4_l2t_free */
e                 596 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		if (!atomic_read(&e->refcnt)) {
e                 597 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			e->state = L2T_STATE_SWITCHING;
e                 598 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			e->vlan = vlan;
e                 599 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			e->lport = port;
e                 600 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			ether_addr_copy(e->dmac, eth_addr);
e                 601 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			atomic_set(&e->refcnt, 1);
e                 602 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			ret = write_l2e(adap, e, 0);
e                 604 drivers/net/ethernet/chelsio/cxgb4/l2t.c 				_t4_l2e_free(e);
e                 605 drivers/net/ethernet/chelsio/cxgb4/l2t.c 				spin_unlock(&e->lock);
e                 610 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			atomic_inc(&e->refcnt);
e                 613 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		spin_unlock(&e->lock);
e                 616 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	return e;
e                 693 drivers/net/ethernet/chelsio/cxgb4/l2t.c static char l2e_state(const struct l2t_entry *e)
e                 695 drivers/net/ethernet/chelsio/cxgb4/l2t.c 	switch (e->state) {
e                 700 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		return skb_queue_empty(&e->arpq) ? 'R' : 'A';
e                 715 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		struct l2t_entry *e = v;
e                 717 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		spin_lock_bh(&e->lock);
e                 718 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		if (e->state == L2T_STATE_SWITCHING)
e                 721 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr);
e                 723 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			   e->idx + d->l2t_start, ip, e->dmac,
e                 724 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			   e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport,
e                 725 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			   l2e_state(e), atomic_read(&e->refcnt),
e                 726 drivers/net/ethernet/chelsio/cxgb4/l2t.c 			   e->neigh ? e->neigh->dev->name : "");
e                 727 drivers/net/ethernet/chelsio/cxgb4/l2t.c 		spin_unlock_bh(&e->lock);
e                 110 drivers/net/ethernet/chelsio/cxgb4/l2t.h void cxgb4_l2t_release(struct l2t_entry *e);
e                 112 drivers/net/ethernet/chelsio/cxgb4/l2t.h 		   struct l2t_entry *e);
e                  47 drivers/net/ethernet/chelsio/cxgb4/sched.c 	struct sched_class *e;
e                  50 drivers/net/ethernet/chelsio/cxgb4/sched.c 	e = &s->tab[p->u.params.class];
e                  57 drivers/net/ethernet/chelsio/cxgb4/sched.c 				      p->u.params.channel, e->idx,
e                 113 drivers/net/ethernet/chelsio/cxgb4/sched.c 	struct sched_class *e, *end;
e                 119 drivers/net/ethernet/chelsio/cxgb4/sched.c 	for (e = &s->tab[0]; e != end; ++e) {
e                 123 drivers/net/ethernet/chelsio/cxgb4/sched.c 		if (e->state == SCHED_STATE_UNUSED)
e                 126 drivers/net/ethernet/chelsio/cxgb4/sched.c 		list_for_each_entry(qe, &e->queue_list, list) {
e                 128 drivers/net/ethernet/chelsio/cxgb4/sched.c 				found = e;
e                 146 drivers/net/ethernet/chelsio/cxgb4/sched.c 	struct sched_class *e;
e                 160 drivers/net/ethernet/chelsio/cxgb4/sched.c 	e = t4_sched_queue_lookup(pi, qid, &index);
e                 161 drivers/net/ethernet/chelsio/cxgb4/sched.c 	if (e && index >= 0) {
e                 164 drivers/net/ethernet/chelsio/cxgb4/sched.c 		list_for_each_entry(qe, &e->queue_list, list) {
e                 176 drivers/net/ethernet/chelsio/cxgb4/sched.c 		if (atomic_dec_and_test(&e->refcnt)) {
e                 177 drivers/net/ethernet/chelsio/cxgb4/sched.c 			e->state = SCHED_STATE_UNUSED;
e                 178 drivers/net/ethernet/chelsio/cxgb4/sched.c 			memset(&e->info, 0, sizeof(e->info));
e                 188 drivers/net/ethernet/chelsio/cxgb4/sched.c 	struct sched_class *e;
e                 213 drivers/net/ethernet/chelsio/cxgb4/sched.c 	e = &s->tab[qe->param.class];
e                 218 drivers/net/ethernet/chelsio/cxgb4/sched.c 	list_add_tail(&qe->list, &e->queue_list);
e                 219 drivers/net/ethernet/chelsio/cxgb4/sched.c 	atomic_inc(&e->refcnt);
e                 228 drivers/net/ethernet/chelsio/cxgb4/sched.c 				      struct sched_class *e,
e                 231 drivers/net/ethernet/chelsio/cxgb4/sched.c 	if (!e)
e                 238 drivers/net/ethernet/chelsio/cxgb4/sched.c 		list_for_each_entry(qe, &e->queue_list, list)
e                 358 drivers/net/ethernet/chelsio/cxgb4/sched.c 	struct sched_class *e, *end;
e                 364 drivers/net/ethernet/chelsio/cxgb4/sched.c 		for (e = &s->tab[0]; e != end; ++e) {
e                 365 drivers/net/ethernet/chelsio/cxgb4/sched.c 			if (e->state == SCHED_STATE_UNUSED) {
e                 366 drivers/net/ethernet/chelsio/cxgb4/sched.c 				found = e;
e                 380 drivers/net/ethernet/chelsio/cxgb4/sched.c 		for (e = &s->tab[0]; e != end; ++e) {
e                 381 drivers/net/ethernet/chelsio/cxgb4/sched.c 			if (e->state == SCHED_STATE_UNUSED)
e                 384 drivers/net/ethernet/chelsio/cxgb4/sched.c 			memcpy(&info, &e->info, sizeof(info));
e                 391 drivers/net/ethernet/chelsio/cxgb4/sched.c 				found = e;
e                 403 drivers/net/ethernet/chelsio/cxgb4/sched.c 	struct sched_class *e;
e                 421 drivers/net/ethernet/chelsio/cxgb4/sched.c 	e = t4_sched_class_lookup(pi, p);
e                 422 drivers/net/ethernet/chelsio/cxgb4/sched.c 	if (!e) {
e                 426 drivers/net/ethernet/chelsio/cxgb4/sched.c 		e = t4_sched_class_lookup(pi, NULL);
e                 427 drivers/net/ethernet/chelsio/cxgb4/sched.c 		if (!e)
e                 431 drivers/net/ethernet/chelsio/cxgb4/sched.c 		np.u.params.class = e->idx;
e                 436 drivers/net/ethernet/chelsio/cxgb4/sched.c 		memcpy(&e->info, &np, sizeof(e->info));
e                 437 drivers/net/ethernet/chelsio/cxgb4/sched.c 		atomic_set(&e->refcnt, 0);
e                 438 drivers/net/ethernet/chelsio/cxgb4/sched.c 		e->state = SCHED_STATE_ACTIVE;
e                 441 drivers/net/ethernet/chelsio/cxgb4/sched.c 	return e;
e                 470 drivers/net/ethernet/chelsio/cxgb4/sched.c static void t4_sched_class_free(struct port_info *pi, struct sched_class *e)
e                 472 drivers/net/ethernet/chelsio/cxgb4/sched.c 	t4_sched_class_unbind_all(pi, e, SCHED_QUEUE);
e                 509 drivers/net/ethernet/chelsio/cxgb4/sched.c 			struct sched_class *e;
e                 511 drivers/net/ethernet/chelsio/cxgb4/sched.c 			e = &s->tab[i];
e                 512 drivers/net/ethernet/chelsio/cxgb4/sched.c 			if (e->state == SCHED_STATE_ACTIVE)
e                 513 drivers/net/ethernet/chelsio/cxgb4/sched.c 				t4_sched_class_free(pi, e);
e                  68 drivers/net/ethernet/chelsio/cxgb4/smt.c 	struct smt_entry *e, *end;
e                  70 drivers/net/ethernet/chelsio/cxgb4/smt.c 	for (e = &s->smtab[0], end = &s->smtab[s->smt_size]; e != end; ++e) {
e                  71 drivers/net/ethernet/chelsio/cxgb4/smt.c 		if (e->refcnt == 0) {
e                  73 drivers/net/ethernet/chelsio/cxgb4/smt.c 				first_free = e;
e                  75 drivers/net/ethernet/chelsio/cxgb4/smt.c 			if (e->state == SMT_STATE_SWITCHING) {
e                  79 drivers/net/ethernet/chelsio/cxgb4/smt.c 				if (memcmp(e->src_mac, smac, ETH_ALEN) == 0)
e                  86 drivers/net/ethernet/chelsio/cxgb4/smt.c 		e = first_free;
e                  92 drivers/net/ethernet/chelsio/cxgb4/smt.c 	e->state = SMT_STATE_UNUSED;
e                  95 drivers/net/ethernet/chelsio/cxgb4/smt.c 	return e;
e                  98 drivers/net/ethernet/chelsio/cxgb4/smt.c static void t4_smte_free(struct smt_entry *e)
e                 100 drivers/net/ethernet/chelsio/cxgb4/smt.c 	if (e->refcnt == 0) {  /* hasn't been recycled */
e                 101 drivers/net/ethernet/chelsio/cxgb4/smt.c 		e->state = SMT_STATE_UNUSED;
e                 110 drivers/net/ethernet/chelsio/cxgb4/smt.c void cxgb4_smt_release(struct smt_entry *e)
e                 112 drivers/net/ethernet/chelsio/cxgb4/smt.c 	spin_lock_bh(&e->lock);
e                 113 drivers/net/ethernet/chelsio/cxgb4/smt.c 	if ((--e->refcnt) == 0)
e                 114 drivers/net/ethernet/chelsio/cxgb4/smt.c 		t4_smte_free(e);
e                 115 drivers/net/ethernet/chelsio/cxgb4/smt.c 	spin_unlock_bh(&e->lock);
e                 125 drivers/net/ethernet/chelsio/cxgb4/smt.c 		struct smt_entry *e = &s->smtab[smtidx];
e                 130 drivers/net/ethernet/chelsio/cxgb4/smt.c 		spin_lock(&e->lock);
e                 131 drivers/net/ethernet/chelsio/cxgb4/smt.c 		e->state = SMT_STATE_ERROR;
e                 132 drivers/net/ethernet/chelsio/cxgb4/smt.c 		spin_unlock(&e->lock);
e                 137 drivers/net/ethernet/chelsio/cxgb4/smt.c static int write_smt_entry(struct adapter *adapter, struct smt_entry *e)
e                 160 drivers/net/ethernet/chelsio/cxgb4/smt.c 		row = (e->idx >> 1);
e                 161 drivers/net/ethernet/chelsio/cxgb4/smt.c 		if (e->idx & 1) {
e                 163 drivers/net/ethernet/chelsio/cxgb4/smt.c 			memcpy(req->src_mac1, e->src_mac, ETH_ALEN);
e                 169 drivers/net/ethernet/chelsio/cxgb4/smt.c 			memcpy(req->src_mac0, s->smtab[e->idx - 1].src_mac,
e                 173 drivers/net/ethernet/chelsio/cxgb4/smt.c 			memcpy(req->src_mac0, e->src_mac, ETH_ALEN);
e                 179 drivers/net/ethernet/chelsio/cxgb4/smt.c 			memcpy(req->src_mac1, s->smtab[e->idx + 1].src_mac,
e                 194 drivers/net/ethernet/chelsio/cxgb4/smt.c 		memcpy(req->src_mac0, s->smtab[e->idx].src_mac, ETH_ALEN);
e                 195 drivers/net/ethernet/chelsio/cxgb4/smt.c 		row = e->idx;
e                 199 drivers/net/ethernet/chelsio/cxgb4/smt.c 		htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, e->idx |
e                 212 drivers/net/ethernet/chelsio/cxgb4/smt.c 	struct smt_entry *e;
e                 215 drivers/net/ethernet/chelsio/cxgb4/smt.c 	e = find_or_alloc_smte(s, smac);
e                 216 drivers/net/ethernet/chelsio/cxgb4/smt.c 	if (e) {
e                 217 drivers/net/ethernet/chelsio/cxgb4/smt.c 		spin_lock(&e->lock);
e                 218 drivers/net/ethernet/chelsio/cxgb4/smt.c 		if (!e->refcnt) {
e                 219 drivers/net/ethernet/chelsio/cxgb4/smt.c 			e->refcnt = 1;
e                 220 drivers/net/ethernet/chelsio/cxgb4/smt.c 			e->state = SMT_STATE_SWITCHING;
e                 221 drivers/net/ethernet/chelsio/cxgb4/smt.c 			e->pfvf = pfvf;
e                 222 drivers/net/ethernet/chelsio/cxgb4/smt.c 			memcpy(e->src_mac, smac, ETH_ALEN);
e                 223 drivers/net/ethernet/chelsio/cxgb4/smt.c 			write_smt_entry(adap, e);
e                 225 drivers/net/ethernet/chelsio/cxgb4/smt.c 			++e->refcnt;
e                 227 drivers/net/ethernet/chelsio/cxgb4/smt.c 		spin_unlock(&e->lock);
e                 230 drivers/net/ethernet/chelsio/cxgb4/smt.c 	return e;
e                  74 drivers/net/ethernet/chelsio/cxgb4/smt.h void cxgb4_smt_release(struct smt_entry *e);
e                 117 drivers/net/ethernet/chelsio/cxgb4/srq.c 	struct srq_entry *e;
e                 127 drivers/net/ethernet/chelsio/cxgb4/srq.c 	e = s->entryp;
e                 128 drivers/net/ethernet/chelsio/cxgb4/srq.c 	e->valid = 1;
e                 129 drivers/net/ethernet/chelsio/cxgb4/srq.c 	e->idx = idx;
e                 130 drivers/net/ethernet/chelsio/cxgb4/srq.c 	e->pdid = SRQT_PDID_G(be64_to_cpu(rpl->rsvd_pdid));
e                 131 drivers/net/ethernet/chelsio/cxgb4/srq.c 	e->qlen = SRQT_QLEN_G(be32_to_cpu(rpl->qlen_qbase));
e                 132 drivers/net/ethernet/chelsio/cxgb4/srq.c 	e->qbase = SRQT_QBASE_G(be32_to_cpu(rpl->qlen_qbase));
e                 133 drivers/net/ethernet/chelsio/cxgb4/srq.c 	e->cur_msn = be16_to_cpu(rpl->cur_msn);
e                 134 drivers/net/ethernet/chelsio/cxgb4/srq.c 	e->max_msn = be16_to_cpu(rpl->max_msn);
e                 945 drivers/net/ethernet/intel/i40e/i40e_adminq.c 					     struct i40e_arq_event_info *e,
e                 958 drivers/net/ethernet/intel/i40e/i40e_adminq.c 	memset(&e->desc, 0, sizeof(e->desc));
e                 993 drivers/net/ethernet/intel/i40e/i40e_adminq.c 	e->desc = *desc;
e                 995 drivers/net/ethernet/intel/i40e/i40e_adminq.c 	e->msg_len = min(datalen, e->buf_len);
e                 996 drivers/net/ethernet/intel/i40e/i40e_adminq.c 	if (e->msg_buf != NULL && (e->msg_len != 0))
e                 997 drivers/net/ethernet/intel/i40e/i40e_adminq.c 		memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
e                 998 drivers/net/ethernet/intel/i40e/i40e_adminq.c 		       e->msg_len);
e                1001 drivers/net/ethernet/intel/i40e/i40e_adminq.c 	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
e                1027 drivers/net/ethernet/intel/i40e/i40e_adminq.c 	i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode), &e->desc);
e                8567 drivers/net/ethernet/intel/i40e/i40e_main.c 				  struct i40e_arq_event_info *e)
e                8570 drivers/net/ethernet/intel/i40e/i40e_main.c 		(struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
e                8689 drivers/net/ethernet/intel/i40e/i40e_main.c 					   struct i40e_arq_event_info *e)
e                8692 drivers/net/ethernet/intel/i40e/i40e_main.c 		(struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
e                9178 drivers/net/ethernet/intel/i40e/i40e_main.c 				   struct i40e_arq_event_info *e)
e                9181 drivers/net/ethernet/intel/i40e/i40e_main.c 		(struct i40e_aqc_get_link_status *)&e->desc.params.raw;
e                  23 drivers/net/ethernet/intel/i40e/i40e_prototype.h 					     struct i40e_arq_event_info *e,
e                 846 drivers/net/ethernet/intel/iavf/iavf_adminq.c 					struct iavf_arq_event_info *e,
e                 859 drivers/net/ethernet/intel/iavf/iavf_adminq.c 	memset(&e->desc, 0, sizeof(e->desc));
e                 894 drivers/net/ethernet/intel/iavf/iavf_adminq.c 	e->desc = *desc;
e                 896 drivers/net/ethernet/intel/iavf/iavf_adminq.c 	e->msg_len = min(datalen, e->buf_len);
e                 897 drivers/net/ethernet/intel/iavf/iavf_adminq.c 	if (e->msg_buf && (e->msg_len != 0))
e                 898 drivers/net/ethernet/intel/iavf/iavf_adminq.c 		memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
e                 899 drivers/net/ethernet/intel/iavf/iavf_adminq.c 		       e->msg_len);
e                 902 drivers/net/ethernet/intel/iavf/iavf_adminq.c 	iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
e                  23 drivers/net/ethernet/intel/iavf/iavf_prototype.h 					struct iavf_arq_event_info *e,
e                 251 drivers/net/ethernet/intel/ice/ice_adminq_cmd.h 	} e;
e                  27 drivers/net/ethernet/intel/ice/ice_common.h 		  struct ice_rq_event_info *e, u16 *pending);
e                1036 drivers/net/ethernet/intel/ice/ice_controlq.c 		  struct ice_rq_event_info *e, u16 *pending)
e                1048 drivers/net/ethernet/intel/ice/ice_controlq.c 	memset(&e->desc, 0, sizeof(e->desc));
e                1081 drivers/net/ethernet/intel/ice/ice_controlq.c 	memcpy(&e->desc, desc, sizeof(e->desc));
e                1083 drivers/net/ethernet/intel/ice/ice_controlq.c 	e->msg_len = min(datalen, e->buf_len);
e                1084 drivers/net/ethernet/intel/ice/ice_controlq.c 	if (e->msg_buf && e->msg_len)
e                1085 drivers/net/ethernet/intel/ice/ice_controlq.c 		memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len);
e                1089 drivers/net/ethernet/intel/ice/ice_controlq.c 	ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, e->msg_buf,
e                1585 drivers/net/ethernet/intel/ice/ice_lib.c 	struct ice_fltr_list_entry *e, *tmp;
e                1587 drivers/net/ethernet/intel/ice/ice_lib.c 	list_for_each_entry_safe(e, tmp, h, list_entry) {
e                1588 drivers/net/ethernet/intel/ice/ice_lib.c 		list_del(&e->list_entry);
e                1589 drivers/net/ethernet/intel/ice/ice_lib.c 		devm_kfree(dev, e);
e                 511 drivers/net/ethernet/intel/ice/ice_switch.c 		sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
e                 519 drivers/net/ethernet/intel/ice/ice_switch.c 		*vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
e                 912 drivers/net/ethernet/marvell/skge.c 	struct skge_element *e;
e                 915 drivers/net/ethernet/marvell/skge.c 	ring->start = kcalloc(ring->count, sizeof(*e), GFP_KERNEL);
e                 919 drivers/net/ethernet/marvell/skge.c 	for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) {
e                 920 drivers/net/ethernet/marvell/skge.c 		e->desc = d;
e                 922 drivers/net/ethernet/marvell/skge.c 			e->next = ring->start;
e                 925 drivers/net/ethernet/marvell/skge.c 			e->next = e + 1;
e                 935 drivers/net/ethernet/marvell/skge.c static int skge_rx_setup(struct skge_port *skge, struct skge_element *e,
e                 938 drivers/net/ethernet/marvell/skge.c 	struct skge_rx_desc *rd = e->desc;
e                 949 drivers/net/ethernet/marvell/skge.c 	e->skb = skb;
e                 958 drivers/net/ethernet/marvell/skge.c 	dma_unmap_addr_set(e, mapaddr, map);
e                 959 drivers/net/ethernet/marvell/skge.c 	dma_unmap_len_set(e, maplen, bufsize);
e                 967 drivers/net/ethernet/marvell/skge.c static inline void skge_rx_reuse(struct skge_element *e, unsigned int size)
e                 969 drivers/net/ethernet/marvell/skge.c 	struct skge_rx_desc *rd = e->desc;
e                 985 drivers/net/ethernet/marvell/skge.c 	struct skge_element *e;
e                 987 drivers/net/ethernet/marvell/skge.c 	e = ring->start;
e                 989 drivers/net/ethernet/marvell/skge.c 		struct skge_rx_desc *rd = e->desc;
e                 991 drivers/net/ethernet/marvell/skge.c 		if (e->skb) {
e                 993 drivers/net/ethernet/marvell/skge.c 					 dma_unmap_addr(e, mapaddr),
e                 994 drivers/net/ethernet/marvell/skge.c 					 dma_unmap_len(e, maplen),
e                 996 drivers/net/ethernet/marvell/skge.c 			dev_kfree_skb(e->skb);
e                 997 drivers/net/ethernet/marvell/skge.c 			e->skb = NULL;
e                 999 drivers/net/ethernet/marvell/skge.c 	} while ((e = e->next) != ring->start);
e                1010 drivers/net/ethernet/marvell/skge.c 	struct skge_element *e;
e                1012 drivers/net/ethernet/marvell/skge.c 	e = ring->start;
e                1022 drivers/net/ethernet/marvell/skge.c 		if (skge_rx_setup(skge, e, skb, skge->rx_buf_size) < 0) {
e                1026 drivers/net/ethernet/marvell/skge.c 	} while ((e = e->next) != ring->start);
e                2510 drivers/net/ethernet/marvell/skge.c 		      const struct skge_element *e)
e                2514 drivers/net/ethernet/marvell/skge.c 	u64 base = skge->dma + (e->desc - skge->mem);
e                2734 drivers/net/ethernet/marvell/skge.c 	struct skge_element *e;
e                2746 drivers/net/ethernet/marvell/skge.c 	e = skge->tx_ring.to_use;
e                2747 drivers/net/ethernet/marvell/skge.c 	td = e->desc;
e                2749 drivers/net/ethernet/marvell/skge.c 	e->skb = skb;
e                2755 drivers/net/ethernet/marvell/skge.c 	dma_unmap_addr_set(e, mapaddr, map);
e                2756 drivers/net/ethernet/marvell/skge.c 	dma_unmap_len_set(e, maplen, len);
e                2793 drivers/net/ethernet/marvell/skge.c 			e = e->next;
e                2794 drivers/net/ethernet/marvell/skge.c 			e->skb = skb;
e                2795 drivers/net/ethernet/marvell/skge.c 			tf = e->desc;
e                2800 drivers/net/ethernet/marvell/skge.c 			dma_unmap_addr_set(e, mapaddr, map);
e                2801 drivers/net/ethernet/marvell/skge.c 			dma_unmap_len_set(e, maplen, skb_frag_size(frag));
e                2818 drivers/net/ethernet/marvell/skge.c 		     e - skge->tx_ring.start, skb->len);
e                2820 drivers/net/ethernet/marvell/skge.c 	skge->tx_ring.to_use = e->next;
e                2831 drivers/net/ethernet/marvell/skge.c 	e = skge->tx_ring.to_use;
e                2833 drivers/net/ethernet/marvell/skge.c 			 dma_unmap_addr(e, mapaddr),
e                2834 drivers/net/ethernet/marvell/skge.c 			 dma_unmap_len(e, maplen),
e                2837 drivers/net/ethernet/marvell/skge.c 		e = e->next;
e                2839 drivers/net/ethernet/marvell/skge.c 			       dma_unmap_addr(e, mapaddr),
e                2840 drivers/net/ethernet/marvell/skge.c 			       dma_unmap_len(e, maplen),
e                2853 drivers/net/ethernet/marvell/skge.c static inline void skge_tx_unmap(struct pci_dev *pdev, struct skge_element *e,
e                2858 drivers/net/ethernet/marvell/skge.c 		pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr),
e                2859 drivers/net/ethernet/marvell/skge.c 				 dma_unmap_len(e, maplen),
e                2862 drivers/net/ethernet/marvell/skge.c 		pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr),
e                2863 drivers/net/ethernet/marvell/skge.c 			       dma_unmap_len(e, maplen),
e                2871 drivers/net/ethernet/marvell/skge.c 	struct skge_element *e;
e                2873 drivers/net/ethernet/marvell/skge.c 	for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
e                2874 drivers/net/ethernet/marvell/skge.c 		struct skge_tx_desc *td = e->desc;
e                2876 drivers/net/ethernet/marvell/skge.c 		skge_tx_unmap(skge->hw->pdev, e, td->control);
e                2879 drivers/net/ethernet/marvell/skge.c 			dev_kfree_skb(e->skb);
e                2884 drivers/net/ethernet/marvell/skge.c 	skge->tx_ring.to_clean = e;
e                3046 drivers/net/ethernet/marvell/skge.c 				   struct skge_element *e,
e                3055 drivers/net/ethernet/marvell/skge.c 		     e - skge->rx_ring.start, status, len);
e                3075 drivers/net/ethernet/marvell/skge.c 					    dma_unmap_addr(e, mapaddr),
e                3076 drivers/net/ethernet/marvell/skge.c 					    dma_unmap_len(e, maplen),
e                3078 drivers/net/ethernet/marvell/skge.c 		skb_copy_from_linear_data(e->skb, skb->data, len);
e                3080 drivers/net/ethernet/marvell/skge.c 					       dma_unmap_addr(e, mapaddr),
e                3081 drivers/net/ethernet/marvell/skge.c 					       dma_unmap_len(e, maplen),
e                3083 drivers/net/ethernet/marvell/skge.c 		skge_rx_reuse(e, skge->rx_buf_size);
e                3092 drivers/net/ethernet/marvell/skge.c 		ee = *e;
e                3097 drivers/net/ethernet/marvell/skge.c 		if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) {
e                3122 drivers/net/ethernet/marvell/skge.c 		     e - skge->rx_ring.start, control, status);
e                3141 drivers/net/ethernet/marvell/skge.c 	skge_rx_reuse(e, skge->rx_buf_size);
e                3150 drivers/net/ethernet/marvell/skge.c 	struct skge_element *e;
e                3155 drivers/net/ethernet/marvell/skge.c 	for (e = ring->to_clean; e != ring->to_use; e = e->next) {
e                3156 drivers/net/ethernet/marvell/skge.c 		u32 control = ((const struct skge_tx_desc *) e->desc)->control;
e                3161 drivers/net/ethernet/marvell/skge.c 		skge_tx_unmap(skge->hw->pdev, e, control);
e                3166 drivers/net/ethernet/marvell/skge.c 				     e - skge->tx_ring.start);
e                3169 drivers/net/ethernet/marvell/skge.c 			bytes_compl += e->skb->len;
e                3171 drivers/net/ethernet/marvell/skge.c 			dev_consume_skb_any(e->skb);
e                3175 drivers/net/ethernet/marvell/skge.c 	skge->tx_ring.to_clean = e;
e                3198 drivers/net/ethernet/marvell/skge.c 	struct skge_element *e;
e                3205 drivers/net/ethernet/marvell/skge.c 	for (e = ring->to_clean; prefetch(e->next), work_done < budget; e = e->next) {
e                3206 drivers/net/ethernet/marvell/skge.c 		struct skge_rx_desc *rd = e->desc;
e                3215 drivers/net/ethernet/marvell/skge.c 		skb = skge_rx_get(dev, e, control, rd->status, rd->csum2);
e                3221 drivers/net/ethernet/marvell/skge.c 	ring->to_clean = e;
e                3693 drivers/net/ethernet/marvell/skge.c 	const struct skge_element *e;
e                3702 drivers/net/ethernet/marvell/skge.c 	for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
e                3703 drivers/net/ethernet/marvell/skge.c 		const struct skge_tx_desc *t = e->desc;
e                3710 drivers/net/ethernet/marvell/skge.c 	for (e = skge->rx_ring.to_clean; ; e = e->next) {
e                3711 drivers/net/ethernet/marvell/skge.c 		const struct skge_rx_desc *r = e->desc;
e                 174 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 				      struct mlx5e_encap_entry *e)
e                 176 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	if (!e->tunnel) {
e                 181 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	return e->tunnel->generate_ip_tun_hdr(buf, ip_proto, e);
e                 185 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 			     struct mlx5e_encap_entry *e,
e                 191 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	ether_addr_copy(eth->h_dest, e->h_dest);
e                 210 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 				    struct mlx5e_encap_entry *e)
e                 213 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	const struct ip_tunnel_key *tun_key = &e->tun_info->key;
e                 237 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 		e->tunnel->calc_hlen(e);
e                 255 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	e->m_neigh.dev = n->dev;
e                 256 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	e->m_neigh.family = n->ops->family;
e                 257 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
e                 258 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	e->out_dev = out_dev;
e                 259 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	e->route_dev = route_dev;
e                 266 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
e                 272 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	ether_addr_copy(e->h_dest, n->ha);
e                 276 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	ip = (struct iphdr *)gen_eth_tnl_hdr(encap_header, route_dev, e,
e                 289 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 					 &ip->protocol, e);
e                 293 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	e->encap_size = ipv4_encap_size;
e                 294 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	e->encap_header = encap_header;
e                 303 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
e                 304 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 						     e->reformat_type,
e                 307 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	if (IS_ERR(e->pkt_reformat)) {
e                 308 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 		err = PTR_ERR(e->pkt_reformat);
e                 312 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	e->flags |= MLX5_ENCAP_ENTRY_VALID;
e                 318 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
e                 329 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 				    struct mlx5e_encap_entry *e)
e                 332 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	const struct ip_tunnel_key *tun_key = &e->tun_info->key;
e                 356 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 		e->tunnel->calc_hlen(e);
e                 374 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	e->m_neigh.dev = n->dev;
e                 375 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	e->m_neigh.family = n->ops->family;
e                 376 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
e                 377 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	e->out_dev = out_dev;
e                 378 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	e->route_dev = route_dev;
e                 385 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
e                 391 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	ether_addr_copy(e->h_dest, n->ha);
e                 395 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	ip6h = (struct ipv6hdr *)gen_eth_tnl_hdr(encap_header, route_dev, e,
e                 407 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 					 &ip6h->nexthdr, e);
e                 411 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	e->encap_size = ipv6_encap_size;
e                 412 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	e->encap_header = encap_header;
e                 422 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
e                 423 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 						     e->reformat_type,
e                 426 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	if (IS_ERR(e->pkt_reformat)) {
e                 427 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 		err = PTR_ERR(e->pkt_reformat);
e                 431 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	e->flags |= MLX5_ENCAP_ENTRY_VALID;
e                 437 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
e                 459 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 				 struct mlx5e_encap_entry *e,
e                 465 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 		e->reformat_type = -1;
e                 469 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	return tunnel->init_encap_attr(tunnel_dev, priv, e, extack);
e                  26 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h 	int (*calc_hlen)(struct mlx5e_encap_entry *e);
e                  29 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h 			       struct mlx5e_encap_entry *e,
e                  33 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h 				   struct mlx5e_encap_entry *e);
e                  54 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h 				 struct mlx5e_encap_entry *e,
e                  59 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h 				    struct mlx5e_encap_entry *e);
e                  63 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h 				    struct mlx5e_encap_entry *e);
e                  15 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c static int mlx5e_tc_tun_calc_hlen_geneve(struct mlx5e_encap_entry *e)
e                  19 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c 	       e->tun_info->options_len;
e                  66 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c 					       struct mlx5e_encap_entry *e,
e                  69 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c 	e->tunnel = &geneve_tunnel;
e                  75 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c 	e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
e                  94 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c 					     struct mlx5e_encap_entry *e)
e                  96 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c 	const struct ip_tunnel_info *tun_info = e->tun_info;
e                  12 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c static int mlx5e_tc_tun_calc_hlen_gretap(struct mlx5e_encap_entry *e)
e                  14 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c 	return gre_calc_hlen(e->tun_info->key.tun_flags);
e                  19 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c 					       struct mlx5e_encap_entry *e,
e                  22 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c 	e->tunnel = &gre_tunnel;
e                  23 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c 	e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_NVGRE;
e                  29 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c 					     struct mlx5e_encap_entry *e)
e                  31 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c 	const struct ip_tunnel_key *tun_key  = &e->tun_info->key;
e                  45 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c 	hdr_len	= mlx5e_tc_tun_calc_hlen_gretap(e);
e                  13 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c static int mlx5e_tc_tun_calc_hlen_vxlan(struct mlx5e_encap_entry *e)
e                  62 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c 					      struct mlx5e_encap_entry *e,
e                  65 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c 	int dst_port = be16_to_cpu(e->tun_info->key.tp_dst);
e                  67 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c 	e->tunnel = &vxlan_tunnel;
e                  78 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c 	e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
e                  84 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c 					    struct mlx5e_encap_entry *e)
e                  86 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c 	const struct ip_tunnel_key *tun_key = &e->tun_info->key;
e                 598 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 				   struct mlx5e_encap_entry *e,
e                 602 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	struct ethhdr *eth = (struct ethhdr *)e->encap_header;
e                 610 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	wait_for_completion(&e->res_ready);
e                 613 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
e                 614 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	if (e->compl_result < 0 || (encap_connected == neigh_connected &&
e                 615 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 				    ether_addr_equal(e->h_dest, ha)))
e                 618 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	mlx5e_take_all_encap_flows(e, &flow_list);
e                 620 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	if ((e->flags & MLX5_ENCAP_ENTRY_VALID) &&
e                 621 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	    (!neigh_connected || !ether_addr_equal(e->h_dest, ha)))
e                 622 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 		mlx5e_tc_encap_flows_del(priv, e, &flow_list);
e                 624 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
e                 625 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 		ether_addr_copy(e->h_dest, ha);
e                 630 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 		ether_addr_copy(eth->h_source, e->route_dev->dev_addr);
e                 632 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 		mlx5e_tc_encap_flows_add(priv, e, &flow_list);
e                 644 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	struct mlx5e_encap_entry *e;
e                 667 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	list_for_each_entry(e, &nhe->encap_list, encap_list) {
e                 668 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 		if (!mlx5e_encap_take(e))
e                 671 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 		priv = netdev_priv(e->out_dev);
e                 672 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 		mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
e                 673 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 		mlx5e_encap_put(priv, e);
e                1074 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 					struct mlx5e_encap_entry *e,
e                1084 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
e                1101 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 				 struct mlx5e_encap_entry *e)
e                1109 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	err = mlx5_tun_entropy_refcount_inc(tun_entropy, e->reformat_type);
e                1114 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
e                1116 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 		err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
e                1120 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 						      e->reformat_type);
e                1125 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	e->nhe = nhe;
e                1127 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	list_add_rcu(&e->encap_list, &nhe->encap_list);
e                1136 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 				  struct mlx5e_encap_entry *e)
e                1142 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	if (!e->nhe)
e                1145 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	spin_lock(&e->nhe->encap_list_lock);
e                1146 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	list_del_rcu(&e->encap_list);
e                1147 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	spin_unlock(&e->nhe->encap_list_lock);
e                1149 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	mlx5e_rep_neigh_entry_release(e->nhe);
e                1150 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	e->nhe = NULL;
e                1151 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type);
e                 195 drivers/net/ethernet/mellanox/mlx5/core/en_rep.h 				 struct mlx5e_encap_entry *e);
e                 197 drivers/net/ethernet/mellanox/mlx5/core/en_rep.h 				  struct mlx5e_encap_entry *e);
e                 107 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5e_encap_entry *e; /* attached encap instance */
e                1295 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 			      struct mlx5e_encap_entry *e,
e                1305 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
e                1306 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 						     e->reformat_type,
e                1307 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 						     e->encap_size, e->encap_header,
e                1309 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	if (IS_ERR(e->pkt_reformat)) {
e                1311 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 			       PTR_ERR(e->pkt_reformat));
e                1314 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	e->flags |= MLX5_ENCAP_ENTRY_VALID;
e                1326 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		esw_attr->dests[flow->tmp_efi_index].pkt_reformat = e->pkt_reformat;
e                1360 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 			      struct mlx5e_encap_entry *e,
e                1394 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
e                1395 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
e                1409 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list)
e                1414 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	list_for_each_entry(efi, &e->flows, list) {
e                1436 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 			   struct mlx5e_encap_entry *e)
e                1444 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	for (next = e ?
e                1446 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 					   &e->encap_list,
e                1463 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	if (e)
e                1464 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		mlx5e_encap_put(netdev_priv(e->out_dev), e);
e                1472 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		e = next;
e                1482 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5e_encap_entry *e = NULL;
e                1502 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	while ((e = mlx5e_get_next_valid_encap(nhe, e)) != NULL) {
e                1503 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		struct mlx5e_priv *priv = netdev_priv(e->out_dev);
e                1510 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		list_for_each_entry_safe(efi, tmp, &e->flows, list) {
e                1531 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 			mlx5e_encap_put(priv, e);
e                1553 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
e                1555 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	WARN_ON(!list_empty(&e->flows));
e                1557 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	if (e->compl_result > 0) {
e                1558 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
e                1560 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		if (e->flags & MLX5_ENCAP_ENTRY_VALID)
e                1561 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 			mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
e                1564 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	kfree(e->tun_info);
e                1565 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	kfree(e->encap_header);
e                1566 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	kfree_rcu(e, rcu);
e                1569 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
e                1573 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	if (!refcount_dec_and_mutex_lock(&e->refcnt, &esw->offloads.encap_tbl_lock))
e                1575 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	hash_del_rcu(&e->encap_hlist);
e                1578 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	mlx5e_encap_dealloc(priv, e);
e                1584 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5e_encap_entry *e = flow->encaps[out_index].e;
e                1588 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	if (!e)
e                1593 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	flow->encaps[out_index].e = NULL;
e                1594 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	if (!refcount_dec_and_test(&e->refcnt)) {
e                1598 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	hash_del_rcu(&e->encap_hlist);
e                1601 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	mlx5e_encap_dealloc(priv, e);
e                2956 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c bool mlx5e_encap_take(struct mlx5e_encap_entry *e)
e                2958 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	return refcount_inc_not_zero(&e->refcnt);
e                2966 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5e_encap_entry *e;
e                2969 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
e                2971 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		e_key.ip_tun_key = &e->tun_info->key;
e                2972 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		e_key.tc_tunnel = e->tunnel;
e                2974 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		    mlx5e_encap_take(e))
e                2975 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 			return e;
e                3001 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5e_encap_entry *e;
e                3019 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	e = mlx5e_encap_get(priv, &key, hash_key);
e                3022 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	if (e) {
e                3024 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		wait_for_completion(&e->res_ready);
e                3028 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		if (e->compl_result < 0) {
e                3035 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	e = kzalloc(sizeof(*e), GFP_KERNEL);
e                3036 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	if (!e) {
e                3041 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	refcount_set(&e->refcnt, 1);
e                3042 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	init_completion(&e->res_ready);
e                3049 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	e->tun_info = tun_info;
e                3050 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
e                3054 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	INIT_LIST_HEAD(&e->flows);
e                3055 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
e                3059 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
e                3061 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
e                3065 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	complete_all(&e->res_ready);
e                3067 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		e->compl_result = err;
e                3070 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	e->compl_result = 1;
e                3073 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	flow->encaps[out_index].e = e;
e                3074 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	list_add(&flow->encaps[out_index].list, &e->flows);
e                3076 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	*encap_dev = e->out_dev;
e                3077 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
e                3078 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		attr->dests[out_index].pkt_reformat = e->pkt_reformat;
e                3090 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	if (e)
e                3091 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		mlx5e_encap_put(priv, e);
e                3097 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	kfree(e);
e                  75 drivers/net/ethernet/mellanox/mlx5/core/en_tc.h 			      struct mlx5e_encap_entry *e,
e                  78 drivers/net/ethernet/mellanox/mlx5/core/en_tc.h 			      struct mlx5e_encap_entry *e,
e                  80 drivers/net/ethernet/mellanox/mlx5/core/en_tc.h bool mlx5e_encap_take(struct mlx5e_encap_entry *e);
e                  81 drivers/net/ethernet/mellanox/mlx5/core/en_tc.h void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e);
e                  83 drivers/net/ethernet/mellanox/mlx5/core/en_tc.h void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list);
e                 943 drivers/net/ethernet/mellanox/mlx5/core/port.c 	MLX5_SET16(qpdpm_dscp_reg, qpdpm_dscp, e, 1);
e                 189 drivers/net/ethernet/mellanox/mlxsw/pci_hw.h MLXSW_ITEM32(pci, cqe0, e, 0x0C, 7, 1);
e                 190 drivers/net/ethernet/mellanox/mlxsw/pci_hw.h MLXSW_ITEM32(pci, cqe12, e, 0x00, 27, 1);
e                 191 drivers/net/ethernet/mellanox/mlxsw/pci_hw.h mlxsw_pci_cqe_item_helpers(e, 0, 12, 12);
e                2084 drivers/net/ethernet/mellanox/mlxsw/reg.h MLXSW_ITEM32(reg, ppbt, e, 0x00, 31, 1);
e                2117 drivers/net/ethernet/mellanox/mlxsw/reg.h static inline void mlxsw_reg_ppbt_pack(char *payload, enum mlxsw_reg_pxbt_e e,
e                2122 drivers/net/ethernet/mellanox/mlxsw/reg.h 	mlxsw_reg_ppbt_e_set(payload, e);
e                4422 drivers/net/ethernet/mellanox/mlxsw/reg.h MLXSW_ITEM32(reg, paos, e, 0x04, 0, 2);
e                8489 drivers/net/ethernet/mellanox/mlxsw/reg.h MLXSW_ITEM32(reg, mpat, e, 0x04, 31, 1);
e                8614 drivers/net/ethernet/mellanox/mlxsw/reg.h 				       u16 system_port, bool e,
e                8620 drivers/net/ethernet/mellanox/mlxsw/reg.h 	mlxsw_reg_mpat_e_set(payload, e);
e                9175 drivers/net/ethernet/mellanox/mlxsw/reg.h MLXSW_ITEM32(reg, mpsc, e, 0x04, 30, 1);
e                9186 drivers/net/ethernet/mellanox/mlxsw/reg.h static inline void mlxsw_reg_mpsc_pack(char *payload, u8 local_port, bool e,
e                9191 drivers/net/ethernet/mellanox/mlxsw/reg.h 	mlxsw_reg_mpsc_e_set(payload, e);
e                4408 drivers/net/ethernet/realtek/r8169_main.c 			    const struct ephy_info *e, int len)
e                4413 drivers/net/ethernet/realtek/r8169_main.c 		w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits;
e                4414 drivers/net/ethernet/realtek/r8169_main.c 		rtl_ephy_write(tp, e->offset, w);
e                4415 drivers/net/ethernet/realtek/r8169_main.c 		e++;
e                3043 drivers/net/ethernet/sun/cassini.c 	unsigned char *e = &cp->dev->dev_addr[0];
e                3091 drivers/net/ethernet/sun/cassini.c 	writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED);
e                3103 drivers/net/ethernet/sun/cassini.c 	writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0));
e                3104 drivers/net/ethernet/sun/cassini.c 	writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1));
e                3105 drivers/net/ethernet/sun/cassini.c 	writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2));
e                 626 drivers/net/ethernet/sun/sunbmac.c 	unsigned char *e = &bp->dev->dev_addr[0];
e                 647 drivers/net/ethernet/sun/sunbmac.c 	sbus_writel(((e[4] << 8) | e[5]), bregs + BMAC_MACADDR2);
e                 648 drivers/net/ethernet/sun/sunbmac.c 	sbus_writel(((e[2] << 8) | e[3]), bregs + BMAC_MACADDR1);
e                 649 drivers/net/ethernet/sun/sunbmac.c 	sbus_writel(((e[0] << 8) | e[1]), bregs + BMAC_MACADDR0);
e                 666 drivers/net/ethernet/sun/sunbmac.c 	sbus_writel(((e[5] | e[4] << 8) & 0x3ff),
e                1822 drivers/net/ethernet/sun/sungem.c 	unsigned char *e = &gp->dev->dev_addr[0];
e                1840 drivers/net/ethernet/sun/sungem.c 	writel((e[5] | (e[4] << 8)) & 0x3ff, gp->regs + MAC_RANDSEED);
e                1842 drivers/net/ethernet/sun/sungem.c 	writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
e                1843 drivers/net/ethernet/sun/sungem.c 	writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
e                1844 drivers/net/ethernet/sun/sungem.c 	writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
e                2099 drivers/net/ethernet/sun/sungem.c 		unsigned char *e = &gp->dev->dev_addr[0];
e                2105 drivers/net/ethernet/sun/sungem.c 		writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0);
e                2106 drivers/net/ethernet/sun/sungem.c 		writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1);
e                2107 drivers/net/ethernet/sun/sungem.c 		writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2);
e                2432 drivers/net/ethernet/sun/sungem.c 	unsigned char *e = &dev->dev_addr[0];
e                2447 drivers/net/ethernet/sun/sungem.c 	writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
e                2448 drivers/net/ethernet/sun/sungem.c 	writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
e                2449 drivers/net/ethernet/sun/sungem.c 	writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
e                1429 drivers/net/ethernet/sun/sunhme.c 	unsigned char *e = &hp->dev->dev_addr[0];
e                1515 drivers/net/ethernet/sun/sunhme.c 	hme_write32(hp, bregs + BMAC_RSEED, ((e[5] | e[4]<<8)&0x3ff));
e                1517 drivers/net/ethernet/sun/sunhme.c 	hme_write32(hp, bregs + BMAC_MACADDR2, ((e[4] << 8) | e[5]));
e                1518 drivers/net/ethernet/sun/sunhme.c 	hme_write32(hp, bregs + BMAC_MACADDR1, ((e[2] << 8) | e[3]));
e                1519 drivers/net/ethernet/sun/sunhme.c 	hme_write32(hp, bregs + BMAC_MACADDR0, ((e[0] << 8) | e[1]));
e                 147 drivers/net/ethernet/sun/sunqe.c 	unsigned char *e = &qep->dev->dev_addr[0];
e                 206 drivers/net/ethernet/sun/sunqe.c 	sbus_writeb(e[0], mregs + MREGS_ETHADDR);
e                 207 drivers/net/ethernet/sun/sunqe.c 	sbus_writeb(e[1], mregs + MREGS_ETHADDR);
e                 208 drivers/net/ethernet/sun/sunqe.c 	sbus_writeb(e[2], mregs + MREGS_ETHADDR);
e                 209 drivers/net/ethernet/sun/sunqe.c 	sbus_writeb(e[3], mregs + MREGS_ETHADDR);
e                 210 drivers/net/ethernet/sun/sunqe.c 	sbus_writeb(e[4], mregs + MREGS_ETHADDR);
e                 211 drivers/net/ethernet/sun/sunqe.c 	sbus_writeb(e[5], mregs + MREGS_ETHADDR);
e                 319 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c 			range->freq[chs].e = 6;
e                 592 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c 	iwe.u.freq.e = 0; /* table value in MHz */
e                 101 drivers/net/fddi/skfp/ecm.c 	smc->e.path_test = PT_PASSED ;
e                 102 drivers/net/fddi/skfp/ecm.c 	smc->e.trace_prop = 0 ;
e                 103 drivers/net/fddi/skfp/ecm.c 	smc->e.sb_flag = 0 ;
e                 105 drivers/net/fddi/skfp/ecm.c 	smc->e.ecm_line_state = FALSE ;
e                 157 drivers/net/fddi/skfp/ecm.c 		smc->e.DisconnectFlag = FALSE ;
e                 160 drivers/net/fddi/skfp/ecm.c 		smc->e.DisconnectFlag = TRUE ;
e                 168 drivers/net/fddi/skfp/ecm.c 		smc->e.path_test = PT_PASSED ;
e                 169 drivers/net/fddi/skfp/ecm.c 		smc->e.ecm_line_state = FALSE ;
e                 176 drivers/net/fddi/skfp/ecm.c 			&& smc->e.path_test==PT_PASSED) {
e                 181 drivers/net/fddi/skfp/ecm.c 		else if (cmd == EC_CONNECT && (smc->e.path_test==PT_PASSED) &&
e                 190 drivers/net/fddi/skfp/ecm.c 		smc->e.trace_prop = 0 ;
e                 224 drivers/net/fddi/skfp/ecm.c 			smc->e.path_test = PT_EXITING ;
e                 229 drivers/net/fddi/skfp/ecm.c 		else if (smc->e.path_test == PT_PENDING) {
e                 240 drivers/net/fddi/skfp/ecm.c 			smc->e.path_test = PT_PENDING ;
e                 254 drivers/net/fddi/skfp/ecm.c 			(smc->e.path_test != PT_PENDING)) {
e                 260 drivers/net/fddi/skfp/ecm.c 			(smc->e.path_test == PT_PENDING)) {
e                 265 drivers/net/fddi/skfp/ecm.c 		else if (cmd == EC_CONNECT && smc->e.path_test == PT_PASSED) {
e                 271 drivers/net/fddi/skfp/ecm.c 			smc->e.path_test == PT_PENDING) {
e                 272 drivers/net/fddi/skfp/ecm.c 			smc->e.path_test = PT_EXITING ;
e                 280 drivers/net/fddi/skfp/ecm.c 			smc->e.path_test != PT_PENDING) {
e                 287 drivers/net/fddi/skfp/ecm.c 		smc->e.path_test = PT_TESTING ;
e                 295 drivers/net/fddi/skfp/ecm.c 			smc->e.path_test = PT_PASSED ;
e                 297 drivers/net/fddi/skfp/ecm.c 		if (smc->e.path_test == PT_FAILED)
e                 301 drivers/net/fddi/skfp/ecm.c 		if (smc->e.path_test == PT_FAILED &&
e                 313 drivers/net/fddi/skfp/ecm.c 		else if (smc->e.path_test == PT_PASSED) {
e                 318 drivers/net/fddi/skfp/ecm.c 		else if (smc->e.path_test == PT_FAILED &&
e                 353 drivers/net/fddi/skfp/ecm.c 		smc->e.ecm_line_state = TRUE ;	/* flag to pcm: report Q/HLS */
e                 363 drivers/net/fddi/skfp/ecm.c 			smc->e.sb_flag = FALSE ;
e                 364 drivers/net/fddi/skfp/ecm.c 			smc->e.ecm_line_state = FALSE ;
e                 369 drivers/net/fddi/skfp/ecm.c 		else if (!smc->e.sb_flag &&
e                 372 drivers/net/fddi/skfp/ecm.c 			smc->e.sb_flag = TRUE ;
e                 380 drivers/net/fddi/skfp/ecm.c 			smc->e.ecm_line_state = FALSE ;
e                 403 drivers/net/fddi/skfp/ecm.c 		else if (cmd == EC_CONNECT && smc->e.path_test == PT_PASSED) {
e                 437 drivers/net/fddi/skfp/ecm.c 	DB_ECM("ECM : prop_actions - trace_prop %lu", smc->e.trace_prop);
e                 440 drivers/net/fddi/skfp/ecm.c 	if (smc->e.trace_prop & ENTITY_BIT(ENTITY_MAC)) {
e                 445 drivers/net/fddi/skfp/ecm.c 	else if ((smc->e.trace_prop & ENTITY_BIT(ENTITY_PHY(PA))) &&
e                 451 drivers/net/fddi/skfp/ecm.c 	else if ((smc->e.trace_prop & ENTITY_BIT(ENTITY_PHY(PB))) &&
e                 460 drivers/net/fddi/skfp/ecm.c 		smc->e.path_test = PT_PENDING ;
e                 462 drivers/net/fddi/skfp/ecm.c 	smc->e.trace_prop = 0 ;
e                 475 drivers/net/fddi/skfp/ecm.c 	while (smc->e.trace_prop) {
e                 477 drivers/net/fddi/skfp/ecm.c 		       smc->e.trace_prop);
e                 479 drivers/net/fddi/skfp/ecm.c 		if (smc->e.trace_prop & ENTITY_BIT(ENTITY_MAC)) {
e                 481 drivers/net/fddi/skfp/ecm.c 			smc->e.trace_prop &= ~ENTITY_BIT(ENTITY_MAC) ;
e                 486 drivers/net/fddi/skfp/ecm.c 				if (smc->e.trace_prop &
e                 491 drivers/net/fddi/skfp/ecm.c 			smc->e.trace_prop &= ~ENTITY_BIT(ENTITY_PHY(p)) ;
e                 498 drivers/net/fddi/skfp/ecm.c 			smc->e.path_test = PT_PENDING ;
e                 516 drivers/net/fddi/skfp/ecm.c 	smt_timer_start(smc,&smc->e.ecm_timer,value,EV_TOKEN(EVENT_ECM,event));
e                 525 drivers/net/fddi/skfp/ecm.c 	if (smc->e.ecm_timer.tm_active)
e                 526 drivers/net/fddi/skfp/ecm.c 		smt_timer_stop(smc,&smc->e.ecm_timer) ;
e                 440 drivers/net/fddi/skfp/h/smc.h 	struct s_ecm	e ;		/* ecm */
e                1768 drivers/net/fddi/skfp/pcmplc.c 		DB_PCMN(1, "PLC %d: MDcF = %x", np, smc->e.DisconnectFlag);
e                1769 drivers/net/fddi/skfp/pcmplc.c 		if (smc->e.DisconnectFlag == FALSE) {
e                1799 drivers/net/fddi/skfp/pcmplc.c 			smc->e.trace_prop |= ENTITY_BIT(ENTITY_PHY(np)) ;
e                1809 drivers/net/fddi/skfp/pcmplc.c 		if (smc->e.path_test == PT_PASSED) {
e                1814 drivers/net/fddi/skfp/pcmplc.c 			smc->e.path_test = PT_PENDING ;
e                  43 drivers/net/fddi/skfp/pmf.c #define MOFFSS(e)	offsetof(struct fddi_mib, e)
e                  44 drivers/net/fddi/skfp/pmf.c #define MOFFMS(e)	offsetof(struct fddi_mib_m, e)
e                  45 drivers/net/fddi/skfp/pmf.c #define MOFFAS(e)	offsetof(struct fddi_mib_a, e)
e                  46 drivers/net/fddi/skfp/pmf.c #define MOFFPS(e)	offsetof(struct fddi_mib_p, e)
e                 508 drivers/net/fddi/skfp/rmt.c 		smc->e.trace_prop |= ENTITY_BIT(ENTITY_MAC) ;
e                1806 drivers/net/fddi/skfp/smt.c static int entity_to_index(struct s_smc *smc, int e)
e                1808 drivers/net/fddi/skfp/smt.c 	if (e == ENTITY_MAC)
e                1811 drivers/net/fddi/skfp/smt.c 		return phy_index(smc, e - ENTITY_PHY(0));
e                 114 drivers/net/ieee802154/mac802154_hwsim.c 	struct hwsim_edge *e;
e                 120 drivers/net/ieee802154/mac802154_hwsim.c 	list_for_each_entry_rcu(e, &current_phy->edges, list) {
e                 126 drivers/net/ieee802154/mac802154_hwsim.c 		if (e->endpoint->suspended)
e                 129 drivers/net/ieee802154/mac802154_hwsim.c 		endpoint_pib = rcu_dereference(e->endpoint->pib);
e                 134 drivers/net/ieee802154/mac802154_hwsim.c 			einfo = rcu_dereference(e->info);
e                 136 drivers/net/ieee802154/mac802154_hwsim.c 				ieee802154_rx_irqsafe(e->endpoint->hw, newskb,
e                 209 drivers/net/ieee802154/mac802154_hwsim.c 	struct hwsim_edge *e;
e                 229 drivers/net/ieee802154/mac802154_hwsim.c 	list_for_each_entry_rcu(e, &phy->edges, list) {
e                 239 drivers/net/ieee802154/mac802154_hwsim.c 				  e->endpoint->idx);
e                 247 drivers/net/ieee802154/mac802154_hwsim.c 		einfo = rcu_dereference(e->info);
e                 383 drivers/net/ieee802154/mac802154_hwsim.c 	struct hwsim_edge *e;
e                 385 drivers/net/ieee802154/mac802154_hwsim.c 	e = kzalloc(sizeof(*e), GFP_KERNEL);
e                 386 drivers/net/ieee802154/mac802154_hwsim.c 	if (!e)
e                 391 drivers/net/ieee802154/mac802154_hwsim.c 		kfree(e);
e                 396 drivers/net/ieee802154/mac802154_hwsim.c 	rcu_assign_pointer(e->info, einfo);
e                 397 drivers/net/ieee802154/mac802154_hwsim.c 	e->endpoint = endpoint;
e                 399 drivers/net/ieee802154/mac802154_hwsim.c 	return e;
e                 402 drivers/net/ieee802154/mac802154_hwsim.c static void hwsim_free_edge(struct hwsim_edge *e)
e                 407 drivers/net/ieee802154/mac802154_hwsim.c 	einfo = rcu_dereference(e->info);
e                 411 drivers/net/ieee802154/mac802154_hwsim.c 	kfree_rcu(e, rcu);
e                 418 drivers/net/ieee802154/mac802154_hwsim.c 	struct hwsim_edge *e;
e                 451 drivers/net/ieee802154/mac802154_hwsim.c 	list_for_each_entry_rcu(e, &phy_v0->edges, list) {
e                 452 drivers/net/ieee802154/mac802154_hwsim.c 		if (e->endpoint->idx == v1) {
e                 460 drivers/net/ieee802154/mac802154_hwsim.c 	e = hwsim_alloc_edge(phy_v1, 0xff);
e                 461 drivers/net/ieee802154/mac802154_hwsim.c 	if (!e) {
e                 465 drivers/net/ieee802154/mac802154_hwsim.c 	list_add_rcu(&e->list, &phy_v0->edges);
e                 480 drivers/net/ieee802154/mac802154_hwsim.c 	struct hwsim_edge *e;
e                 504 drivers/net/ieee802154/mac802154_hwsim.c 	list_for_each_entry_rcu(e, &phy_v0->edges, list) {
e                 505 drivers/net/ieee802154/mac802154_hwsim.c 		if (e->endpoint->idx == v1) {
e                 507 drivers/net/ieee802154/mac802154_hwsim.c 			list_del_rcu(&e->list);
e                 508 drivers/net/ieee802154/mac802154_hwsim.c 			hwsim_free_edge(e);
e                 527 drivers/net/ieee802154/mac802154_hwsim.c 	struct hwsim_edge *e;
e                 560 drivers/net/ieee802154/mac802154_hwsim.c 	list_for_each_entry_rcu(e, &phy_v0->edges, list) {
e                 561 drivers/net/ieee802154/mac802154_hwsim.c 		if (e->endpoint->idx == v1) {
e                 563 drivers/net/ieee802154/mac802154_hwsim.c 			rcu_assign_pointer(e->info, einfo);
e                 678 drivers/net/ieee802154/mac802154_hwsim.c 	struct hwsim_edge *e;
e                 683 drivers/net/ieee802154/mac802154_hwsim.c 		list_for_each_entry_rcu(e, &tmp->edges, list) {
e                 684 drivers/net/ieee802154/mac802154_hwsim.c 			if (e->endpoint->idx == phy->idx) {
e                 685 drivers/net/ieee802154/mac802154_hwsim.c 				list_del_rcu(&e->list);
e                 686 drivers/net/ieee802154/mac802154_hwsim.c 				hwsim_free_edge(e);
e                 698 drivers/net/ieee802154/mac802154_hwsim.c 	struct hwsim_edge *e;
e                 701 drivers/net/ieee802154/mac802154_hwsim.c 		e = hwsim_alloc_edge(sub, 0xff);
e                 702 drivers/net/ieee802154/mac802154_hwsim.c 		if (!e)
e                 705 drivers/net/ieee802154/mac802154_hwsim.c 		list_add_rcu(&e->list, &phy->edges);
e                 709 drivers/net/ieee802154/mac802154_hwsim.c 		e = hwsim_alloc_edge(phy, 0xff);
e                 710 drivers/net/ieee802154/mac802154_hwsim.c 		if (!e)
e                 713 drivers/net/ieee802154/mac802154_hwsim.c 		list_add_rcu(&e->list, &sub->edges);
e                 720 drivers/net/ieee802154/mac802154_hwsim.c 	list_for_each_entry_rcu(e, &phy->edges, list) {
e                 721 drivers/net/ieee802154/mac802154_hwsim.c 		list_del_rcu(&e->list);
e                 722 drivers/net/ieee802154/mac802154_hwsim.c 		hwsim_free_edge(e);
e                 328 drivers/net/netdevsim/bpf.c nsim_map_key_match(struct bpf_map *map, struct nsim_map_entry *e, void *key)
e                 330 drivers/net/netdevsim/bpf.c 	return e->key && !memcmp(key, e->key, map->key_size);
e                 410 drivers/net/tun.c 	struct tun_flow_entry *e;
e                 412 drivers/net/tun.c 	hlist_for_each_entry_rcu(e, head, hash_link) {
e                 413 drivers/net/tun.c 		if (e->rxhash == rxhash)
e                 414 drivers/net/tun.c 			return e;
e                 423 drivers/net/tun.c 	struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
e                 425 drivers/net/tun.c 	if (e) {
e                 428 drivers/net/tun.c 		e->updated = jiffies;
e                 429 drivers/net/tun.c 		e->rxhash = rxhash;
e                 430 drivers/net/tun.c 		e->rps_rxhash = 0;
e                 431 drivers/net/tun.c 		e->queue_index = queue_index;
e                 432 drivers/net/tun.c 		e->tun = tun;
e                 433 drivers/net/tun.c 		hlist_add_head_rcu(&e->hash_link, head);
e                 436 drivers/net/tun.c 	return e;
e                 439 drivers/net/tun.c static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
e                 442 drivers/net/tun.c 		  e->rxhash, e->queue_index);
e                 443 drivers/net/tun.c 	hlist_del_rcu(&e->hash_link);
e                 444 drivers/net/tun.c 	kfree_rcu(e, rcu);
e                 454 drivers/net/tun.c 		struct tun_flow_entry *e;
e                 457 drivers/net/tun.c 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
e                 458 drivers/net/tun.c 			tun_flow_delete(tun, e);
e                 469 drivers/net/tun.c 		struct tun_flow_entry *e;
e                 472 drivers/net/tun.c 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
e                 473 drivers/net/tun.c 			if (e->queue_index == queue_index)
e                 474 drivers/net/tun.c 				tun_flow_delete(tun, e);
e                 492 drivers/net/tun.c 		struct tun_flow_entry *e;
e                 495 drivers/net/tun.c 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
e                 498 drivers/net/tun.c 			this_timer = e->updated + delay;
e                 500 drivers/net/tun.c 				tun_flow_delete(tun, e);
e                 518 drivers/net/tun.c 	struct tun_flow_entry *e;
e                 526 drivers/net/tun.c 	e = tun_flow_find(head, rxhash);
e                 527 drivers/net/tun.c 	if (likely(e)) {
e                 529 drivers/net/tun.c 		if (READ_ONCE(e->queue_index) != queue_index)
e                 530 drivers/net/tun.c 			WRITE_ONCE(e->queue_index, queue_index);
e                 531 drivers/net/tun.c 		if (e->updated != jiffies)
e                 532 drivers/net/tun.c 			e->updated = jiffies;
e                 533 drivers/net/tun.c 		sock_rps_record_flow_hash(e->rps_rxhash);
e                 553 drivers/net/tun.c static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
e                 555 drivers/net/tun.c 	if (unlikely(e->rps_rxhash != hash))
e                 556 drivers/net/tun.c 		e->rps_rxhash = hash;
e                 567 drivers/net/tun.c 	struct tun_flow_entry *e;
e                 574 drivers/net/tun.c 	e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
e                 575 drivers/net/tun.c 	if (e) {
e                 576 drivers/net/tun.c 		tun_flow_save_rps_rxhash(e, txq);
e                 577 drivers/net/tun.c 		txq = e->queue_index;
e                1038 drivers/net/tun.c 		struct tun_flow_entry *e;
e                1042 drivers/net/tun.c 		e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash);
e                1043 drivers/net/tun.c 		if (e)
e                1044 drivers/net/tun.c 			tun_flow_save_rps_rxhash(e, rxhash);
e                 680 drivers/net/usb/cdc_ncm.c 	struct usb_host_endpoint *e, *in = NULL, *out = NULL;
e                 684 drivers/net/usb/cdc_ncm.c 		e = intf->cur_altsetting->endpoint + ep;
e                 687 drivers/net/usb/cdc_ncm.c 		if (!usb_endpoint_maxp(&e->desc))
e                 690 drivers/net/usb/cdc_ncm.c 		switch (e->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
e                 692 drivers/net/usb/cdc_ncm.c 			if (usb_endpoint_dir_in(&e->desc)) {
e                 694 drivers/net/usb/cdc_ncm.c 					dev->status = e;
e                 699 drivers/net/usb/cdc_ncm.c 			if (usb_endpoint_dir_in(&e->desc)) {
e                 701 drivers/net/usb/cdc_ncm.c 					in = e;
e                 704 drivers/net/usb/cdc_ncm.c 					out = e;
e                2888 drivers/net/usb/lan78xx.c 			struct usb_host_endpoint *e;
e                2891 drivers/net/usb/lan78xx.c 			e = alt->endpoint + ep;
e                2892 drivers/net/usb/lan78xx.c 			switch (e->desc.bmAttributes) {
e                2894 drivers/net/usb/lan78xx.c 				if (!usb_endpoint_dir_in(&e->desc))
e                2903 drivers/net/usb/lan78xx.c 			if (usb_endpoint_dir_in(&e->desc)) {
e                2905 drivers/net/usb/lan78xx.c 					in = e;
e                2907 drivers/net/usb/lan78xx.c 					status = e;
e                2910 drivers/net/usb/lan78xx.c 					out = e;
e                  99 drivers/net/usb/usbnet.c 			struct usb_host_endpoint	*e;
e                 102 drivers/net/usb/usbnet.c 			e = alt->endpoint + ep;
e                 105 drivers/net/usb/usbnet.c 			if (!usb_endpoint_maxp(&e->desc))
e                 108 drivers/net/usb/usbnet.c 			switch (e->desc.bmAttributes) {
e                 110 drivers/net/usb/usbnet.c 				if (!usb_endpoint_dir_in(&e->desc))
e                 119 drivers/net/usb/usbnet.c 			if (usb_endpoint_dir_in(&e->desc)) {
e                 121 drivers/net/usb/usbnet.c 					in = e;
e                 123 drivers/net/usb/usbnet.c 					status = e;
e                 126 drivers/net/usb/usbnet.c 					out = e;
e                 555 drivers/net/wimax/i2400m/rx.c 				struct i2400m_roq_log_entry *e)
e                 559 drivers/net/wimax/i2400m/rx.c 	switch(e->type) {
e                 563 drivers/net/wimax/i2400m/rx.c 			index, e->ws, e->count, e->sn, e->nsn, e->new_ws);
e                 567 drivers/net/wimax/i2400m/rx.c 			index, e->ws, e->count, e->sn, e->nsn);
e                 572 drivers/net/wimax/i2400m/rx.c 			index, e->ws, e->count, e->sn, e->nsn, e->new_ws);
e                 577 drivers/net/wimax/i2400m/rx.c 			index, e->ws, e->count, e->sn, e->nsn, e->new_ws);
e                 581 drivers/net/wimax/i2400m/rx.c 			index, e_index, e->type);
e                 593 drivers/net/wimax/i2400m/rx.c 	struct i2400m_roq_log_entry *e;
e                 601 drivers/net/wimax/i2400m/rx.c 	e = &roq->log->entry[cnt_idx];
e                 603 drivers/net/wimax/i2400m/rx.c 	e->type = type;
e                 604 drivers/net/wimax/i2400m/rx.c 	e->ws = ws;
e                 605 drivers/net/wimax/i2400m/rx.c 	e->count = count;
e                 606 drivers/net/wimax/i2400m/rx.c 	e->sn = sn;
e                 607 drivers/net/wimax/i2400m/rx.c 	e->nsn = nsn;
e                 608 drivers/net/wimax/i2400m/rx.c 	e->new_ws = new_ws;
e                 611 drivers/net/wimax/i2400m/rx.c 		i2400m_roq_log_entry_print(i2400m, index, cnt_idx, e);
e                 620 drivers/net/wimax/i2400m/rx.c 	struct i2400m_roq_log_entry *e;
e                 626 drivers/net/wimax/i2400m/rx.c 		e = &roq->log->entry[cnt_idx];
e                 627 drivers/net/wimax/i2400m/rx.c 		i2400m_roq_log_entry_print(i2400m, index, cnt_idx, e);
e                 628 drivers/net/wimax/i2400m/rx.c 		memset(e, 0, sizeof(*e));
e                 126 drivers/net/wireless/ath/ath9k/common-debug.c #define RXS_ERR(s, e)					\
e                 130 drivers/net/wireless/ath/ath9k/common-debug.c 				 rxstats->e);		\
e                  62 drivers/net/wireless/ath/dfs_pri_detector.h 	     (*add_pulse)(struct pri_detector *de, struct pulse_event *e);
e                2252 drivers/net/wireless/atmel/atmel.c 	if (fwrq->e == 1) {
e                2256 drivers/net/wireless/atmel/atmel.c 		fwrq->e = 0;
e                2260 drivers/net/wireless/atmel/atmel.c 	if (fwrq->m < 0 || fwrq->m > 1000 || fwrq->e > 0)
e                2281 drivers/net/wireless/atmel/atmel.c 	fwrq->e = 0;
e                2361 drivers/net/wireless/atmel/atmel.c 		iwe.u.freq.e = 0;
e                2420 drivers/net/wireless/atmel/atmel.c 			range->freq[k++].e = 1;
e                 648 drivers/net/wireless/broadcom/b43/debugfs.c 	struct b43_dfsentry *e = dev->dfsentry;
e                 652 drivers/net/wireless/broadcom/b43/debugfs.c 		debugfs_remove(e->dyn_debug_dentries[i]);
e                 657 drivers/net/wireless/broadcom/b43/debugfs.c 	struct b43_dfsentry *e = dev->dfsentry;
e                 660 drivers/net/wireless/broadcom/b43/debugfs.c 	e->dyn_debug[id] = (initstate);				\
e                 661 drivers/net/wireless/broadcom/b43/debugfs.c 	e->dyn_debug_dentries[id] =				\
e                 662 drivers/net/wireless/broadcom/b43/debugfs.c 		debugfs_create_bool(name, 0600, e->subdir,	\
e                 663 drivers/net/wireless/broadcom/b43/debugfs.c 				&(e->dyn_debug[id]));		\
e                 681 drivers/net/wireless/broadcom/b43/debugfs.c 	struct b43_dfsentry *e;
e                 686 drivers/net/wireless/broadcom/b43/debugfs.c 	e = kzalloc(sizeof(*e), GFP_KERNEL);
e                 687 drivers/net/wireless/broadcom/b43/debugfs.c 	if (!e) {
e                 691 drivers/net/wireless/broadcom/b43/debugfs.c 	e->dev = dev;
e                 692 drivers/net/wireless/broadcom/b43/debugfs.c 	log = &e->txstatlog;
e                 697 drivers/net/wireless/broadcom/b43/debugfs.c 		kfree(e);
e                 702 drivers/net/wireless/broadcom/b43/debugfs.c 	dev->dfsentry = e;
e                 705 drivers/net/wireless/broadcom/b43/debugfs.c 	e->subdir = debugfs_create_dir(devdir, rootdir);
e                 707 drivers/net/wireless/broadcom/b43/debugfs.c 	e->mmio16read_next = 0xFFFF; /* invalid address */
e                 708 drivers/net/wireless/broadcom/b43/debugfs.c 	e->mmio32read_next = 0xFFFF; /* invalid address */
e                 709 drivers/net/wireless/broadcom/b43/debugfs.c 	e->shm16read_routing_next = 0xFFFFFFFF; /* invalid routing */
e                 710 drivers/net/wireless/broadcom/b43/debugfs.c 	e->shm16read_addr_next = 0xFFFFFFFF; /* invalid address */
e                 711 drivers/net/wireless/broadcom/b43/debugfs.c 	e->shm32read_routing_next = 0xFFFFFFFF; /* invalid routing */
e                 712 drivers/net/wireless/broadcom/b43/debugfs.c 	e->shm32read_addr_next = 0xFFFFFFFF; /* invalid address */
e                 716 drivers/net/wireless/broadcom/b43/debugfs.c 		e->file_##name.dentry =				\
e                 718 drivers/net/wireless/broadcom/b43/debugfs.c 					mode, e->subdir, dev,	\
e                 742 drivers/net/wireless/broadcom/b43/debugfs.c 	struct b43_dfsentry *e;
e                 746 drivers/net/wireless/broadcom/b43/debugfs.c 	e = dev->dfsentry;
e                 747 drivers/net/wireless/broadcom/b43/debugfs.c 	if (!e)
e                 751 drivers/net/wireless/broadcom/b43/debugfs.c 	debugfs_remove(e->file_shm16read.dentry);
e                 752 drivers/net/wireless/broadcom/b43/debugfs.c 	debugfs_remove(e->file_shm16write.dentry);
e                 753 drivers/net/wireless/broadcom/b43/debugfs.c 	debugfs_remove(e->file_shm32read.dentry);
e                 754 drivers/net/wireless/broadcom/b43/debugfs.c 	debugfs_remove(e->file_shm32write.dentry);
e                 755 drivers/net/wireless/broadcom/b43/debugfs.c 	debugfs_remove(e->file_mmio16read.dentry);
e                 756 drivers/net/wireless/broadcom/b43/debugfs.c 	debugfs_remove(e->file_mmio16write.dentry);
e                 757 drivers/net/wireless/broadcom/b43/debugfs.c 	debugfs_remove(e->file_mmio32read.dentry);
e                 758 drivers/net/wireless/broadcom/b43/debugfs.c 	debugfs_remove(e->file_mmio32write.dentry);
e                 759 drivers/net/wireless/broadcom/b43/debugfs.c 	debugfs_remove(e->file_txstat.dentry);
e                 760 drivers/net/wireless/broadcom/b43/debugfs.c 	debugfs_remove(e->file_restart.dentry);
e                 761 drivers/net/wireless/broadcom/b43/debugfs.c 	debugfs_remove(e->file_loctls.dentry);
e                 763 drivers/net/wireless/broadcom/b43/debugfs.c 	debugfs_remove(e->subdir);
e                 764 drivers/net/wireless/broadcom/b43/debugfs.c 	kfree(e->txstatlog.log);
e                 765 drivers/net/wireless/broadcom/b43/debugfs.c 	kfree(e);
e                 771 drivers/net/wireless/broadcom/b43/debugfs.c 	struct b43_dfsentry *e = dev->dfsentry;
e                 776 drivers/net/wireless/broadcom/b43/debugfs.c 	if (!e)
e                 778 drivers/net/wireless/broadcom/b43/debugfs.c 	log = &e->txstatlog;
e                  36 drivers/net/wireless/broadcom/b43/phy_ht.c 			const struct b43_phy_ht_channeltab_e_radio2059 *e)
e                  42 drivers/net/wireless/broadcom/b43/phy_ht.c 	b43_radio_write(dev, 0x16, e->radio_syn16);
e                  43 drivers/net/wireless/broadcom/b43/phy_ht.c 	b43_radio_write(dev, 0x17, e->radio_syn17);
e                  44 drivers/net/wireless/broadcom/b43/phy_ht.c 	b43_radio_write(dev, 0x22, e->radio_syn22);
e                  45 drivers/net/wireless/broadcom/b43/phy_ht.c 	b43_radio_write(dev, 0x25, e->radio_syn25);
e                  46 drivers/net/wireless/broadcom/b43/phy_ht.c 	b43_radio_write(dev, 0x27, e->radio_syn27);
e                  47 drivers/net/wireless/broadcom/b43/phy_ht.c 	b43_radio_write(dev, 0x28, e->radio_syn28);
e                  48 drivers/net/wireless/broadcom/b43/phy_ht.c 	b43_radio_write(dev, 0x29, e->radio_syn29);
e                  49 drivers/net/wireless/broadcom/b43/phy_ht.c 	b43_radio_write(dev, 0x2c, e->radio_syn2c);
e                  50 drivers/net/wireless/broadcom/b43/phy_ht.c 	b43_radio_write(dev, 0x2d, e->radio_syn2d);
e                  51 drivers/net/wireless/broadcom/b43/phy_ht.c 	b43_radio_write(dev, 0x37, e->radio_syn37);
e                  52 drivers/net/wireless/broadcom/b43/phy_ht.c 	b43_radio_write(dev, 0x41, e->radio_syn41);
e                  53 drivers/net/wireless/broadcom/b43/phy_ht.c 	b43_radio_write(dev, 0x43, e->radio_syn43);
e                  54 drivers/net/wireless/broadcom/b43/phy_ht.c 	b43_radio_write(dev, 0x47, e->radio_syn47);
e                  58 drivers/net/wireless/broadcom/b43/phy_ht.c 		b43_radio_write(dev, r | 0x4a, e->radio_rxtx4a);
e                  59 drivers/net/wireless/broadcom/b43/phy_ht.c 		b43_radio_write(dev, r | 0x58, e->radio_rxtx58);
e                  60 drivers/net/wireless/broadcom/b43/phy_ht.c 		b43_radio_write(dev, r | 0x5a, e->radio_rxtx5a);
e                  61 drivers/net/wireless/broadcom/b43/phy_ht.c 		b43_radio_write(dev, r | 0x6a, e->radio_rxtx6a);
e                  62 drivers/net/wireless/broadcom/b43/phy_ht.c 		b43_radio_write(dev, r | 0x6d, e->radio_rxtx6d);
e                  63 drivers/net/wireless/broadcom/b43/phy_ht.c 		b43_radio_write(dev, r | 0x6e, e->radio_rxtx6e);
e                  64 drivers/net/wireless/broadcom/b43/phy_ht.c 		b43_radio_write(dev, r | 0x92, e->radio_rxtx92);
e                  65 drivers/net/wireless/broadcom/b43/phy_ht.c 		b43_radio_write(dev, r | 0x98, e->radio_rxtx98);
e                 764 drivers/net/wireless/broadcom/b43/phy_ht.c 				const struct b43_phy_ht_channeltab_e_phy *e,
e                 782 drivers/net/wireless/broadcom/b43/phy_ht.c 	b43_phy_write(dev, B43_PHY_HT_BW1, e->bw1);
e                 783 drivers/net/wireless/broadcom/b43/phy_ht.c 	b43_phy_write(dev, B43_PHY_HT_BW2, e->bw2);
e                 784 drivers/net/wireless/broadcom/b43/phy_ht.c 	b43_phy_write(dev, B43_PHY_HT_BW3, e->bw3);
e                 785 drivers/net/wireless/broadcom/b43/phy_ht.c 	b43_phy_write(dev, B43_PHY_HT_BW4, e->bw4);
e                 786 drivers/net/wireless/broadcom/b43/phy_ht.c 	b43_phy_write(dev, B43_PHY_HT_BW5, e->bw5);
e                 787 drivers/net/wireless/broadcom/b43/phy_ht.c 	b43_phy_write(dev, B43_PHY_HT_BW6, e->bw6);
e                 653 drivers/net/wireless/broadcom/b43/phy_lp.c 	const struct lpphy_stx_table_entry *e;
e                 658 drivers/net/wireless/broadcom/b43/phy_lp.c 		e = &lpphy_stx_table[i];
e                 659 drivers/net/wireless/broadcom/b43/phy_lp.c 		tmp = b43_radio_read(dev, e->rf_addr);
e                 660 drivers/net/wireless/broadcom/b43/phy_lp.c 		tmp >>= e->rf_shift;
e                 661 drivers/net/wireless/broadcom/b43/phy_lp.c 		tmp <<= e->phy_shift;
e                 662 drivers/net/wireless/broadcom/b43/phy_lp.c 		b43_phy_maskset(dev, B43_PHY_OFDM(0xF2 + e->phy_offset),
e                 663 drivers/net/wireless/broadcom/b43/phy_lp.c 				~(e->mask << e->phy_shift), tmp);
e                 155 drivers/net/wireless/broadcom/b43/phy_n.c 	const struct nphy_rf_control_override_rev7 *e;
e                 170 drivers/net/wireless/broadcom/b43/phy_n.c 	e = b43_nphy_get_rf_ctl_over_rev7(dev, field, override);
e                 179 drivers/net/wireless/broadcom/b43/phy_n.c 		if (e)
e                 180 drivers/net/wireless/broadcom/b43/phy_n.c 			val_addr = (i == 0) ? e->val_addr_core0 : e->val_addr_core1;
e                 184 drivers/net/wireless/broadcom/b43/phy_n.c 			if (e) /* Do it safer, better than wl */
e                 185 drivers/net/wireless/broadcom/b43/phy_n.c 				b43_phy_mask(dev, val_addr, ~e->val_mask);
e                 189 drivers/net/wireless/broadcom/b43/phy_n.c 				if (e)
e                 190 drivers/net/wireless/broadcom/b43/phy_n.c 					b43_phy_maskset(dev, val_addr, ~e->val_mask, (value << e->val_shift));
e                1046 drivers/net/wireless/broadcom/b43/phy_n.c 				const struct b43_nphy_channeltab_entry_rev3 *e)
e                1048 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_radio_write(dev, B2056_SYN_PLL_VCOCAL1, e->radio_syn_pll_vcocal1);
e                1049 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_radio_write(dev, B2056_SYN_PLL_VCOCAL2, e->radio_syn_pll_vcocal2);
e                1050 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_radio_write(dev, B2056_SYN_PLL_REFDIV, e->radio_syn_pll_refdiv);
e                1051 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_radio_write(dev, B2056_SYN_PLL_MMD2, e->radio_syn_pll_mmd2);
e                1052 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_radio_write(dev, B2056_SYN_PLL_MMD1, e->radio_syn_pll_mmd1);
e                1054 drivers/net/wireless/broadcom/b43/phy_n.c 					e->radio_syn_pll_loopfilter1);
e                1056 drivers/net/wireless/broadcom/b43/phy_n.c 					e->radio_syn_pll_loopfilter2);
e                1058 drivers/net/wireless/broadcom/b43/phy_n.c 					e->radio_syn_pll_loopfilter3);
e                1060 drivers/net/wireless/broadcom/b43/phy_n.c 					e->radio_syn_pll_loopfilter4);
e                1062 drivers/net/wireless/broadcom/b43/phy_n.c 					e->radio_syn_pll_loopfilter5);
e                1064 drivers/net/wireless/broadcom/b43/phy_n.c 					e->radio_syn_reserved_addr27);
e                1066 drivers/net/wireless/broadcom/b43/phy_n.c 					e->radio_syn_reserved_addr28);
e                1068 drivers/net/wireless/broadcom/b43/phy_n.c 					e->radio_syn_reserved_addr29);
e                1070 drivers/net/wireless/broadcom/b43/phy_n.c 					e->radio_syn_logen_vcobuf1);
e                1071 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_radio_write(dev, B2056_SYN_LOGEN_MIXER2, e->radio_syn_logen_mixer2);
e                1072 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_radio_write(dev, B2056_SYN_LOGEN_BUF3, e->radio_syn_logen_buf3);
e                1073 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_radio_write(dev, B2056_SYN_LOGEN_BUF4, e->radio_syn_logen_buf4);
e                1076 drivers/net/wireless/broadcom/b43/phy_n.c 					e->radio_rx0_lnaa_tune);
e                1078 drivers/net/wireless/broadcom/b43/phy_n.c 					e->radio_rx0_lnag_tune);
e                1081 drivers/net/wireless/broadcom/b43/phy_n.c 					e->radio_tx0_intpaa_boost_tune);
e                1083 drivers/net/wireless/broadcom/b43/phy_n.c 					e->radio_tx0_intpag_boost_tune);
e                1085 drivers/net/wireless/broadcom/b43/phy_n.c 					e->radio_tx0_pada_boost_tune);
e                1087 drivers/net/wireless/broadcom/b43/phy_n.c 					e->radio_tx0_padg_boost_tune);
e                1089 drivers/net/wireless/broadcom/b43/phy_n.c 					e->radio_tx0_pgaa_boost_tune);
e                1091 drivers/net/wireless/broadcom/b43/phy_n.c 					e->radio_tx0_pgag_boost_tune);
e                1093 drivers/net/wireless/broadcom/b43/phy_n.c 					e->radio_tx0_mixa_boost_tune);
e                1095 drivers/net/wireless/broadcom/b43/phy_n.c 					e->radio_tx0_mixg_boost_tune);
e                1098 drivers/net/wireless/broadcom/b43/phy_n.c 					e->radio_rx1_lnaa_tune);
e                1100 drivers/net/wireless/broadcom/b43/phy_n.c 					e->radio_rx1_lnag_tune);
e                1103 drivers/net/wireless/broadcom/b43/phy_n.c 					e->radio_tx1_intpaa_boost_tune);
e                1105 drivers/net/wireless/broadcom/b43/phy_n.c 					e->radio_tx1_intpag_boost_tune);
e                1107 drivers/net/wireless/broadcom/b43/phy_n.c 					e->radio_tx1_pada_boost_tune);
e                1109 drivers/net/wireless/broadcom/b43/phy_n.c 					e->radio_tx1_padg_boost_tune);
e                1111 drivers/net/wireless/broadcom/b43/phy_n.c 					e->radio_tx1_pgaa_boost_tune);
e                1113 drivers/net/wireless/broadcom/b43/phy_n.c 					e->radio_tx1_pgag_boost_tune);
e                1115 drivers/net/wireless/broadcom/b43/phy_n.c 					e->radio_tx1_mixa_boost_tune);
e                1117 drivers/net/wireless/broadcom/b43/phy_n.c 					e->radio_tx1_mixg_boost_tune);
e                1122 drivers/net/wireless/broadcom/b43/phy_n.c 				const struct b43_nphy_channeltab_entry_rev3 *e)
e                1142 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_chantab_radio_2056_upload(dev, e);
e                1373 drivers/net/wireless/broadcom/b43/phy_n.c 				const struct b43_nphy_channeltab_entry_rev2 *e)
e                1375 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_radio_write(dev, B2055_PLL_REF, e->radio_pll_ref);
e                1376 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_radio_write(dev, B2055_RF_PLLMOD0, e->radio_rf_pllmod0);
e                1377 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_radio_write(dev, B2055_RF_PLLMOD1, e->radio_rf_pllmod1);
e                1378 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_radio_write(dev, B2055_VCO_CAPTAIL, e->radio_vco_captail);
e                1381 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_radio_write(dev, B2055_VCO_CAL1, e->radio_vco_cal1);
e                1382 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_radio_write(dev, B2055_VCO_CAL2, e->radio_vco_cal2);
e                1383 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_radio_write(dev, B2055_PLL_LFC1, e->radio_pll_lfc1);
e                1384 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_radio_write(dev, B2055_PLL_LFR1, e->radio_pll_lfr1);
e                1387 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_radio_write(dev, B2055_PLL_LFC2, e->radio_pll_lfc2);
e                1388 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_radio_write(dev, B2055_LGBUF_CENBUF, e->radio_lgbuf_cenbuf);
e                1389 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_radio_write(dev, B2055_LGEN_TUNE1, e->radio_lgen_tune1);
e                1390 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_radio_write(dev, B2055_LGEN_TUNE2, e->radio_lgen_tune2);
e                1393 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_radio_write(dev, B2055_C1_LGBUF_ATUNE, e->radio_c1_lgbuf_atune);
e                1394 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_radio_write(dev, B2055_C1_LGBUF_GTUNE, e->radio_c1_lgbuf_gtune);
e                1395 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_radio_write(dev, B2055_C1_RX_RFR1, e->radio_c1_rx_rfr1);
e                1396 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_radio_write(dev, B2055_C1_TX_PGAPADTN, e->radio_c1_tx_pgapadtn);
e                1399 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_radio_write(dev, B2055_C1_TX_MXBGTRIM, e->radio_c1_tx_mxbgtrim);
e                1400 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_radio_write(dev, B2055_C2_LGBUF_ATUNE, e->radio_c2_lgbuf_atune);
e                1401 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_radio_write(dev, B2055_C2_LGBUF_GTUNE, e->radio_c2_lgbuf_gtune);
e                1402 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_radio_write(dev, B2055_C2_RX_RFR1, e->radio_c2_rx_rfr1);
e                1405 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_radio_write(dev, B2055_C2_TX_PGAPADTN, e->radio_c2_tx_pgapadtn);
e                1406 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_radio_write(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim);
e                1411 drivers/net/wireless/broadcom/b43/phy_n.c 				const struct b43_nphy_channeltab_entry_rev2 *e)
e                1415 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_chantab_radio_upload(dev, e);
e                2496 drivers/net/wireless/broadcom/b43/phy_n.c 	struct nphy_gain_ctl_workaround_entry *e;
e                2505 drivers/net/wireless/broadcom/b43/phy_n.c 	e = b43_nphy_get_gain_ctl_workaround_ent(dev, ghz5, ext_lna);
e                2536 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_ntab_write_bulk(dev, B43_NTAB8(0, 8), 4, e->lna1_gain);
e                2537 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_ntab_write_bulk(dev, B43_NTAB8(1, 8), 4, e->lna1_gain);
e                2538 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_ntab_write_bulk(dev, B43_NTAB8(0, 16), 4, e->lna2_gain);
e                2539 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_ntab_write_bulk(dev, B43_NTAB8(1, 16), 4, e->lna2_gain);
e                2540 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_ntab_write_bulk(dev, B43_NTAB8(0, 32), 10, e->gain_db);
e                2541 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_ntab_write_bulk(dev, B43_NTAB8(1, 32), 10, e->gain_db);
e                2542 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_ntab_write_bulk(dev, B43_NTAB8(2, 32), 10, e->gain_bits);
e                2543 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_ntab_write_bulk(dev, B43_NTAB8(3, 32), 10, e->gain_bits);
e                2549 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_phy_write(dev, B43_NPHY_REV3_C1_INITGAIN_A, e->init_gain);
e                2550 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_phy_write(dev, B43_NPHY_REV3_C2_INITGAIN_A, e->init_gain);
e                2553 drivers/net/wireless/broadcom/b43/phy_n.c 				e->rfseq_init);
e                2555 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_phy_write(dev, B43_NPHY_REV3_C1_CLIP_HIGAIN_A, e->cliphi_gain);
e                2556 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_phy_write(dev, B43_NPHY_REV3_C2_CLIP_HIGAIN_A, e->cliphi_gain);
e                2557 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_phy_write(dev, B43_NPHY_REV3_C1_CLIP_MEDGAIN_A, e->clipmd_gain);
e                2558 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_phy_write(dev, B43_NPHY_REV3_C2_CLIP_MEDGAIN_A, e->clipmd_gain);
e                2559 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_phy_write(dev, B43_NPHY_REV3_C1_CLIP_LOGAIN_A, e->cliplo_gain);
e                2560 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_phy_write(dev, B43_NPHY_REV3_C2_CLIP_LOGAIN_A, e->cliplo_gain);
e                2562 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_phy_maskset(dev, B43_NPHY_CRSMINPOWER0, 0xFF00, e->crsmin);
e                2563 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_phy_maskset(dev, B43_NPHY_CRSMINPOWERL0, 0xFF00, e->crsminl);
e                2564 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_phy_maskset(dev, B43_NPHY_CRSMINPOWERU0, 0xFF00, e->crsminu);
e                2565 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, e->nbclip);
e                2566 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, e->nbclip);
e                2568 drivers/net/wireless/broadcom/b43/phy_n.c 			~B43_NPHY_C1_CLIPWBTHRES_CLIP2, e->wlclip);
e                2570 drivers/net/wireless/broadcom/b43/phy_n.c 			~B43_NPHY_C2_CLIPWBTHRES_CLIP2, e->wlclip);
e                6239 drivers/net/wireless/broadcom/b43/phy_n.c 				   const struct b43_phy_n_sfo_cfg *e)
e                6241 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_phy_write(dev, B43_NPHY_BW1A, e->phy_bw1a);
e                6242 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_phy_write(dev, B43_NPHY_BW2, e->phy_bw2);
e                6243 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_phy_write(dev, B43_NPHY_BW3, e->phy_bw3);
e                6244 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_phy_write(dev, B43_NPHY_BW4, e->phy_bw4);
e                6245 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_phy_write(dev, B43_NPHY_BW5, e->phy_bw5);
e                6246 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_phy_write(dev, B43_NPHY_BW6, e->phy_bw6);
e                6270 drivers/net/wireless/broadcom/b43/phy_n.c 				const struct b43_phy_n_sfo_cfg *e,
e                6299 drivers/net/wireless/broadcom/b43/phy_n.c 	b43_chantab_phy_upload(dev, e);
e                1289 drivers/net/wireless/broadcom/b43/radio_2055.c 	const struct b2055_inittab_entry *e;
e                1294 drivers/net/wireless/broadcom/b43/radio_2055.c 		e = &(b2055_inittab[i]);
e                1295 drivers/net/wireless/broadcom/b43/radio_2055.c 		if (!(e->flags & B2055_INITTAB_ENTRY_OK))
e                1297 drivers/net/wireless/broadcom/b43/radio_2055.c 		if ((e->flags & B2055_INITTAB_UPLOAD) || ignore_uploadflag) {
e                1299 drivers/net/wireless/broadcom/b43/radio_2055.c 				value = e->ghz5;
e                1301 drivers/net/wireless/broadcom/b43/radio_2055.c 				value = e->ghz2;
e                1312 drivers/net/wireless/broadcom/b43/radio_2055.c 	const struct b43_nphy_channeltab_entry_rev2 *e;
e                1316 drivers/net/wireless/broadcom/b43/radio_2055.c 		e = &(b43_nphy_channeltab_rev2[i]);
e                1317 drivers/net/wireless/broadcom/b43/radio_2055.c 		if (e->channel == channel)
e                1318 drivers/net/wireless/broadcom/b43/radio_2055.c 			return e;
e                10196 drivers/net/wireless/broadcom/b43/radio_2056.c 				 const struct b2056_inittab_entry *e,
e                10202 drivers/net/wireless/broadcom/b43/radio_2056.c 	for (i = 0; i < length; i++, e++) {
e                10203 drivers/net/wireless/broadcom/b43/radio_2056.c 		if (!(e->flags & B2056_INITTAB_ENTRY_OK))
e                10205 drivers/net/wireless/broadcom/b43/radio_2056.c 		if ((e->flags & B2056_INITTAB_UPLOAD) || ignore_uploadflag) {
e                10207 drivers/net/wireless/broadcom/b43/radio_2056.c 				value = e->ghz5;
e                10209 drivers/net/wireless/broadcom/b43/radio_2056.c 				value = e->ghz2;
e                10241 drivers/net/wireless/broadcom/b43/radio_2056.c 	const struct b2056_inittab_entry *e;
e                10249 drivers/net/wireless/broadcom/b43/radio_2056.c 	e = &pts->syn[B2056_SYN_PLL_CP2];
e                10251 drivers/net/wireless/broadcom/b43/radio_2056.c 	b43_radio_write(dev, B2056_SYN_PLL_CP2, ghz5 ? e->ghz5 : e->ghz2);
e                10258 drivers/net/wireless/broadcom/b43/radio_2056.c 	const struct b43_nphy_channeltab_entry_rev3 *e;
e                10263 drivers/net/wireless/broadcom/b43/radio_2056.c 		e = b43_nphy_channeltab_phy_rev3;
e                10267 drivers/net/wireless/broadcom/b43/radio_2056.c 		e = b43_nphy_channeltab_phy_rev4;
e                10273 drivers/net/wireless/broadcom/b43/radio_2056.c 			e = b43_nphy_channeltab_radio_rev5;
e                10277 drivers/net/wireless/broadcom/b43/radio_2056.c 			e = b43_nphy_channeltab_radio_rev6;
e                10282 drivers/net/wireless/broadcom/b43/radio_2056.c 			e = b43_nphy_channeltab_radio_rev7_9;
e                10286 drivers/net/wireless/broadcom/b43/radio_2056.c 			e = b43_nphy_channeltab_radio_rev8;
e                10290 drivers/net/wireless/broadcom/b43/radio_2056.c 			e = b43_nphy_channeltab_radio_rev11;
e                10299 drivers/net/wireless/broadcom/b43/radio_2056.c 	for (i = 0; i < length; i++, e++) {
e                10300 drivers/net/wireless/broadcom/b43/radio_2056.c 		if (e->freq == freq)
e                10301 drivers/net/wireless/broadcom/b43/radio_2056.c 			return e;
e                 341 drivers/net/wireless/broadcom/b43/radio_2059.c 	const struct b43_phy_ht_channeltab_e_radio2059 *e;
e                 344 drivers/net/wireless/broadcom/b43/radio_2059.c 	e = b43_phy_ht_channeltab_radio2059;
e                 345 drivers/net/wireless/broadcom/b43/radio_2059.c 	for (i = 0; i < ARRAY_SIZE(b43_phy_ht_channeltab_radio2059); i++, e++) {
e                 346 drivers/net/wireless/broadcom/b43/radio_2059.c 		if (e->freq == freq)
e                 347 drivers/net/wireless/broadcom/b43/radio_2059.c 			return e;
e                 545 drivers/net/wireless/broadcom/b43/tables_lpphy.c 	const struct b206x_init_tab_entry *e;
e                 549 drivers/net/wireless/broadcom/b43/tables_lpphy.c 		e = &b2062_init_tab[i];
e                 551 drivers/net/wireless/broadcom/b43/tables_lpphy.c 			if (!(e->flags & B206X_FLAG_G))
e                 553 drivers/net/wireless/broadcom/b43/tables_lpphy.c 			b43_radio_write(dev, e->offset, e->value_g);
e                 555 drivers/net/wireless/broadcom/b43/tables_lpphy.c 			if (!(e->flags & B206X_FLAG_A))
e                 557 drivers/net/wireless/broadcom/b43/tables_lpphy.c 			b43_radio_write(dev, e->offset, e->value_a);
e                 564 drivers/net/wireless/broadcom/b43/tables_lpphy.c 	const struct b206x_init_tab_entry *e;
e                 568 drivers/net/wireless/broadcom/b43/tables_lpphy.c 		e = &b2063_init_tab[i];
e                 570 drivers/net/wireless/broadcom/b43/tables_lpphy.c 			if (!(e->flags & B206X_FLAG_G))
e                 572 drivers/net/wireless/broadcom/b43/tables_lpphy.c 			b43_radio_write(dev, e->offset, e->value_g);
e                 574 drivers/net/wireless/broadcom/b43/tables_lpphy.c 			if (!(e->flags & B206X_FLAG_A))
e                 576 drivers/net/wireless/broadcom/b43/tables_lpphy.c 			b43_radio_write(dev, e->offset, e->value_a);
e                3769 drivers/net/wireless/broadcom/b43/tables_nphy.c 	struct nphy_gain_ctl_workaround_entry *e;
e                3784 drivers/net/wireless/broadcom/b43/tables_nphy.c 	e = &nphy_gain_ctl_workaround[ghz5][phy_idx];
e                3798 drivers/net/wireless/broadcom/b43/tables_nphy.c 			e->cliplo_gain = gain_data[tr_iso];
e                3804 drivers/net/wireless/broadcom/b43/tables_nphy.c 			e->cliplo_gain = gain_data[tr_iso];
e                3808 drivers/net/wireless/broadcom/b43/tables_nphy.c 			e->rfseq_init[0] &= ~0x4000;
e                3809 drivers/net/wireless/broadcom/b43/tables_nphy.c 			e->rfseq_init[1] &= ~0x4000;
e                3810 drivers/net/wireless/broadcom/b43/tables_nphy.c 			e->rfseq_init[2] &= ~0x4000;
e                3811 drivers/net/wireless/broadcom/b43/tables_nphy.c 			e->rfseq_init[3] &= ~0x4000;
e                3812 drivers/net/wireless/broadcom/b43/tables_nphy.c 			e->init_gain &= ~0x4000;
e                3817 drivers/net/wireless/broadcom/b43/tables_nphy.c 				e->crsminu = 0x2d;
e                3819 drivers/net/wireless/broadcom/b43/tables_nphy.c 			e->rfseq_init[0] &= ~0x4000;
e                3820 drivers/net/wireless/broadcom/b43/tables_nphy.c 			e->rfseq_init[1] &= ~0x4000;
e                3821 drivers/net/wireless/broadcom/b43/tables_nphy.c 			e->rfseq_init[2] &= ~0x4000;
e                3822 drivers/net/wireless/broadcom/b43/tables_nphy.c 			e->rfseq_init[3] &= ~0x4000;
e                3823 drivers/net/wireless/broadcom/b43/tables_nphy.c 			e->init_gain &= ~0x4000;
e                3824 drivers/net/wireless/broadcom/b43/tables_nphy.c 			e->rfseq_init[0] |= 0x1000;
e                3825 drivers/net/wireless/broadcom/b43/tables_nphy.c 			e->rfseq_init[1] |= 0x1000;
e                3826 drivers/net/wireless/broadcom/b43/tables_nphy.c 			e->rfseq_init[2] |= 0x1000;
e                3827 drivers/net/wireless/broadcom/b43/tables_nphy.c 			e->rfseq_init[3] |= 0x1000;
e                3828 drivers/net/wireless/broadcom/b43/tables_nphy.c 			e->init_gain |= 0x1000;
e                3832 drivers/net/wireless/broadcom/b43/tables_nphy.c 	return e;
e                3838 drivers/net/wireless/broadcom/b43/tables_nphy.c 	const struct nphy_rf_control_override_rev7 *e;
e                3843 drivers/net/wireless/broadcom/b43/tables_nphy.c 		e = tbl_rf_control_override_rev7_over0;
e                3847 drivers/net/wireless/broadcom/b43/tables_nphy.c 		e = tbl_rf_control_override_rev7_over1;
e                3851 drivers/net/wireless/broadcom/b43/tables_nphy.c 		e = tbl_rf_control_override_rev7_over2;
e                3860 drivers/net/wireless/broadcom/b43/tables_nphy.c 		if (e[i].field == field)
e                3861 drivers/net/wireless/broadcom/b43/tables_nphy.c 			return &e[i];
e                  40 drivers/net/wireless/broadcom/b43/xmit.c 	const struct b43_tx_legacy_rate_phy_ctl_entry *e;
e                  44 drivers/net/wireless/broadcom/b43/xmit.c 		e = &(b43_tx_legacy_rate_phy_ctl[i]);
e                  45 drivers/net/wireless/broadcom/b43/xmit.c 		if (e->bitrate == bitrate)
e                  46 drivers/net/wireless/broadcom/b43/xmit.c 			return e;
e                 170 drivers/net/wireless/broadcom/b43/xmit.c 	const struct b43_tx_legacy_rate_phy_ctl_entry *e;
e                 184 drivers/net/wireless/broadcom/b43/xmit.c 		e = b43_tx_legacy_rate_phy_ctl_ent(bitrate);
e                 185 drivers/net/wireless/broadcom/b43/xmit.c 		if (e) {
e                 186 drivers/net/wireless/broadcom/b43/xmit.c 			control |= e->coding_rate;
e                 187 drivers/net/wireless/broadcom/b43/xmit.c 			control |= e->modulation;
e                 341 drivers/net/wireless/broadcom/b43legacy/debugfs.c 	struct b43legacy_dfsentry *e = dev->dfsentry;
e                 345 drivers/net/wireless/broadcom/b43legacy/debugfs.c 		debugfs_remove(e->dyn_debug_dentries[i]);
e                 350 drivers/net/wireless/broadcom/b43legacy/debugfs.c 	struct b43legacy_dfsentry *e = dev->dfsentry;
e                 353 drivers/net/wireless/broadcom/b43legacy/debugfs.c 	e->dyn_debug[id] = (initstate);				\
e                 354 drivers/net/wireless/broadcom/b43legacy/debugfs.c 	e->dyn_debug_dentries[id] =				\
e                 355 drivers/net/wireless/broadcom/b43legacy/debugfs.c 		debugfs_create_bool(name, 0600, e->subdir,	\
e                 356 drivers/net/wireless/broadcom/b43legacy/debugfs.c 				&(e->dyn_debug[id]));		\
e                 370 drivers/net/wireless/broadcom/b43legacy/debugfs.c 	struct b43legacy_dfsentry *e;
e                 375 drivers/net/wireless/broadcom/b43legacy/debugfs.c 	e = kzalloc(sizeof(*e), GFP_KERNEL);
e                 376 drivers/net/wireless/broadcom/b43legacy/debugfs.c 	if (!e) {
e                 380 drivers/net/wireless/broadcom/b43legacy/debugfs.c 	e->dev = dev;
e                 381 drivers/net/wireless/broadcom/b43legacy/debugfs.c 	log = &e->txstatlog;
e                 386 drivers/net/wireless/broadcom/b43legacy/debugfs.c 		kfree(e);
e                 392 drivers/net/wireless/broadcom/b43legacy/debugfs.c 	dev->dfsentry = e;
e                 395 drivers/net/wireless/broadcom/b43legacy/debugfs.c 	e->subdir = debugfs_create_dir(devdir, rootdir);
e                 399 drivers/net/wireless/broadcom/b43legacy/debugfs.c 		e->file_##name.dentry =				\
e                 401 drivers/net/wireless/broadcom/b43legacy/debugfs.c 					mode, e->subdir, dev,	\
e                 403 drivers/net/wireless/broadcom/b43legacy/debugfs.c 		e->file_##name.dentry = NULL;			\
e                 420 drivers/net/wireless/broadcom/b43legacy/debugfs.c 	struct b43legacy_dfsentry *e;
e                 424 drivers/net/wireless/broadcom/b43legacy/debugfs.c 	e = dev->dfsentry;
e                 425 drivers/net/wireless/broadcom/b43legacy/debugfs.c 	if (!e)
e                 429 drivers/net/wireless/broadcom/b43legacy/debugfs.c 	debugfs_remove(e->file_tsf.dentry);
e                 430 drivers/net/wireless/broadcom/b43legacy/debugfs.c 	debugfs_remove(e->file_ucode_regs.dentry);
e                 431 drivers/net/wireless/broadcom/b43legacy/debugfs.c 	debugfs_remove(e->file_shm.dentry);
e                 432 drivers/net/wireless/broadcom/b43legacy/debugfs.c 	debugfs_remove(e->file_txstat.dentry);
e                 433 drivers/net/wireless/broadcom/b43legacy/debugfs.c 	debugfs_remove(e->file_restart.dentry);
e                 435 drivers/net/wireless/broadcom/b43legacy/debugfs.c 	debugfs_remove(e->subdir);
e                 436 drivers/net/wireless/broadcom/b43legacy/debugfs.c 	kfree(e->txstatlog.log);
e                 437 drivers/net/wireless/broadcom/b43legacy/debugfs.c 	kfree(e);
e                 443 drivers/net/wireless/broadcom/b43legacy/debugfs.c 	struct b43legacy_dfsentry *e = dev->dfsentry;
e                 448 drivers/net/wireless/broadcom/b43legacy/debugfs.c 	if (!e)
e                 450 drivers/net/wireless/broadcom/b43legacy/debugfs.c 	log = &e->txstatlog;
e                1245 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c static u16 brcmf_map_fw_linkdown_reason(const struct brcmf_event_msg *e)
e                1249 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	switch (e->event_code) {
e                1253 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 		reason = e->reason;
e                3105 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 			     const struct brcmf_event_msg *e, void *data)
e                3119 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	status = e->status;
e                3132 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 		if (e->datalen < sizeof(*escan_result_le)) {
e                3143 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 		    escan_buflen > e->datalen ||
e                3340 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 				const struct brcmf_event_msg *e, void *data)
e                3356 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	if (e->datalen < (sizeof(*pfn_result) + sizeof(*netinfo))) {
e                3361 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	if (e->event_code == BRCMF_E_PFN_NET_LOST) {
e                3381 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	datalen = e->datalen - ((void *)netinfo_start - (void *)pfn_result);
e                3514 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c brcmf_wowl_nd_results(struct brcmf_if *ifp, const struct brcmf_event_msg *e,
e                3524 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	if (e->datalen < (sizeof(*pfn_result) + sizeof(*netinfo))) {
e                3531 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	if (e->event_code == BRCMF_E_PFN_NET_LOST) {
e                5087 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 			     const struct brcmf_event_msg *e, void *data)
e                5089 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	switch (e->reason) {
e                5095 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 		brcmf_proto_add_tdls_peer(ifp->drvr, ifp->ifidx, (u8 *)e->addr);
e                5099 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 		brcmf_proto_delete_peer(ifp->drvr, ifp->ifidx, (u8 *)e->addr);
e                5350 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 			    const struct brcmf_event_msg *e)
e                5352 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	u32 event = e->event_code;
e                5353 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	u32 status = e->status;
e                5361 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 		memcpy(vif->profile.bssid, e->addr, ETH_ALEN);
e                5377 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c static bool brcmf_is_linkdown(const struct brcmf_event_msg *e)
e                5379 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	u32 event = e->event_code;
e                5380 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	u16 flags = e->flags;
e                5392 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 			       const struct brcmf_event_msg *e)
e                5394 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	u32 event = e->event_code;
e                5395 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	u32 status = e->status;
e                5399 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 			  e->flags & BRCMF_EVENT_MSG_LINK ? "up" : "down");
e                5497 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 		       const struct brcmf_event_msg *e)
e                5515 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	memcpy(profile->bssid, e->addr, ETH_ALEN);
e                5564 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 		       struct net_device *ndev, const struct brcmf_event_msg *e,
e                5602 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 			       const struct brcmf_event_msg *e, void *data)
e                5606 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	u32 event = e->event_code;
e                5607 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	u32 reason = e->reason;
e                5631 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 		sinfo->assoc_req_ies_len = e->datalen;
e                5634 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 		cfg80211_new_sta(ndev, e->addr, sinfo, GFP_KERNEL);
e                5640 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 		cfg80211_del_sta(ndev, e->addr, GFP_KERNEL);
e                5647 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 			    const struct brcmf_event_msg *e, void *data)
e                5655 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	if ((e->event_code == BRCMF_E_DEAUTH) ||
e                5656 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	    (e->event_code == BRCMF_E_DEAUTH_IND) ||
e                5657 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	    (e->event_code == BRCMF_E_DISASSOC_IND) ||
e                5658 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	    ((e->event_code == BRCMF_E_LINK) && (!e->flags))) {
e                5659 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 		brcmf_proto_delete_peer(ifp->drvr, ifp->ifidx, (u8 *)e->addr);
e                5663 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 		err = brcmf_notify_connect_status_ap(cfg, ndev, e, data);
e                5664 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	} else if (brcmf_is_linkup(ifp->vif, e)) {
e                5667 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 			brcmf_inform_ibss(cfg, ndev, e->addr);
e                5669 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 			memcpy(profile->bssid, e->addr, ETH_ALEN);
e                5670 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 			cfg80211_ibss_joined(ndev, e->addr, chan, GFP_KERNEL);
e                5676 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 			brcmf_bss_connect_done(cfg, ndev, e, true);
e                5678 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	} else if (brcmf_is_linkdown(e)) {
e                5681 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 			brcmf_bss_connect_done(cfg, ndev, e, false);
e                5683 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 					brcmf_map_fw_linkdown_reason(e));
e                5689 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	} else if (brcmf_is_nonetwork(cfg, e)) {
e                5694 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 			brcmf_bss_connect_done(cfg, ndev, e, false);
e                5702 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 			    const struct brcmf_event_msg *e, void *data)
e                5705 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	u32 event = e->event_code;
e                5706 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	u32 status = e->status;
e                5711 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 			brcmf_bss_roaming_done(cfg, ifp->ndev, e);
e                5713 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 			brcmf_bss_connect_done(cfg, ifp->ndev, e, true);
e                5723 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 			const struct brcmf_event_msg *e, void *data)
e                5725 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	u16 flags = e->flags;
e                5733 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	cfg80211_michael_mic_failure(ifp->ndev, (u8 *)&e->addr, key_type, -1,
e                5740 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 				  const struct brcmf_event_msg *e, void *data)
e                  53 drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c 	struct dentry *e;
e                  56 drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c 	e = debugfs_create_devm_seqfile(drvr->bus_if->dev, fn,
e                  58 drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c 	return PTR_ERR_OR_ZERO(e);
e                 104 drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c 	const struct brcmf_feat_fwfeat *e;
e                 109 drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c 		e = &brcmf_feat_fwfeat_map[i];
e                 110 drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c 		if (!strcmp(e->fwid, drv->fwver)) {
e                 111 drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c 			feat_flags = e->feat_flags;
e                1607 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 				       const struct brcmf_event_msg *e,
e                1615 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	if (e->datalen < BRCMF_FWS_FIFO_COUNT) {
e                1616 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		bphy_err(drvr, "event payload too small (%d)\n", e->datalen);
e                1641 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 						const struct brcmf_event_msg *e,
e                 991 drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c 				     const struct brcmf_event_msg *e,
e                1334 drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c 				     const struct brcmf_event_msg *e,
e                1342 drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c 	u32 mgmt_frame_len = e->datalen - sizeof(struct brcmf_rx_mgmt_data);
e                1353 drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c 	if (e->datalen < sizeof(*rxframe)) {
e                1373 drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c 		    (brcmf_p2p_gon_req_collision(p2p, (u8 *)e->addr))) {
e                1376 drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c 			    (ether_addr_equal(afx_hdl->tx_dst_addr, e->addr))) {
e                1415 drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c 	memcpy(mgmt_frame->sa, e->addr, ETH_ALEN);
e                1442 drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c 					const struct brcmf_event_msg *e,
e                1449 drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c 		  e->event_code == BRCMF_E_ACTION_FRAME_OFF_CHAN_COMPLETE ?
e                1451 drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c 		  e->status);
e                1456 drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c 	if (e->event_code == BRCMF_E_ACTION_FRAME_COMPLETE) {
e                1457 drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c 		if (e->status == BRCMF_E_STATUS_SUCCESS) {
e                1854 drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c 					  const struct brcmf_event_msg *e,
e                1868 drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c 	brcmf_dbg(INFO, "Enter: event %d reason %d\n", e->event_code,
e                1869 drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c 		  e->reason);
e                1871 drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c 	if (e->datalen < sizeof(*rxframe)) {
e                1880 drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c 	    (ether_addr_equal(afx_hdl->tx_dst_addr, e->addr))) {
e                1889 drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c 	if (e->bsscfgidx == 0)
e                1905 drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c 	mgmt_frame_len = e->datalen - sizeof(*rxframe);
e                1914 drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c 		  mgmt_frame_len, e->datalen, ch.chspec, freq);
e                 159 drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h 				     const struct brcmf_event_msg *e,
e                 163 drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h 				     const struct brcmf_event_msg *e,
e                 166 drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h 					const struct brcmf_event_msg *e,
e                 174 drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h 					  const struct brcmf_event_msg *e,
e                5795 drivers/net/wireless/cisco/airo.c 	if(fwrq->e == 1) {
e                5799 drivers/net/wireless/cisco/airo.c 		fwrq->e = 0;
e                5803 drivers/net/wireless/cisco/airo.c 	if (fwrq->m < 0 || fwrq->m > 1000 || fwrq->e > 0)
e                5846 drivers/net/wireless/cisco/airo.c 		fwrq->e = 1;
e                5849 drivers/net/wireless/cisco/airo.c 		fwrq->e = 0;
e                6904 drivers/net/wireless/cisco/airo.c 		range->freq[k++].e = 1;	/* Values in MHz -> * 10^5 * 10 */
e                7312 drivers/net/wireless/cisco/airo.c 	iwe.u.freq.e = 1;
e                2813 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	int e, i;
e                2829 drivers/net/wireless/intel/ipw2x00/ipw2100.c 		e = txq->oldest;
e                2836 drivers/net/wireless/intel/ipw2x00/ipw2100.c 		e = txq->oldest + frag_num;
e                2837 drivers/net/wireless/intel/ipw2x00/ipw2100.c 		e %= txq->entries;
e                2880 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	if (!((r <= w && (e < r || e >= w)) || (e < r && e >= w))) {
e                2971 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	txq->oldest = (e + 1) % txq->entries;
e                6640 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	if (fwrq->e == 1) {
e                6650 drivers/net/wireless/intel/ipw2x00/ipw2100.c 			fwrq->e = 0;
e                6655 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	if (fwrq->e > 0 || fwrq->m > 1000) {
e                6678 drivers/net/wireless/intel/ipw2x00/ipw2100.c 	wrqu->freq.e = 0;
e                6871 drivers/net/wireless/intel/ipw2x00/ipw2100.c 		range->freq[val].e = 1;
e                8664 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	if (fwrq->e == 1) {
e                8700 drivers/net/wireless/intel/ipw2x00/ipw2200.c 	wrqu->freq.e = 0;
e                8711 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		wrqu->freq.e = 1;
e                8869 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			range->freq[i].e = 1;
e                8882 drivers/net/wireless/intel/ipw2x00/ipw2200.c 			range->freq[i].e = 1;
e                 256 drivers/net/wireless/intel/ipw2x00/libipw_module.c 	struct proc_dir_entry *e;
e                 265 drivers/net/wireless/intel/ipw2x00/libipw_module.c 	e = proc_create("debug_level", 0644, libipw_proc,
e                 267 drivers/net/wireless/intel/ipw2x00/libipw_module.c 	if (!e) {
e                  94 drivers/net/wireless/intel/ipw2x00/libipw_wx.c 	iwe.u.freq.e = 6;
e                 289 drivers/net/wireless/intel/iwlegacy/3945.h 	s32 a, b, c, d, e;	/* coefficients for voltage->power
e                 698 drivers/net/wireless/intel/iwlegacy/common.c 	__le16 *e;
e                 711 drivers/net/wireless/intel/iwlegacy/common.c 	e = (__le16 *) il->eeprom;
e                 747 drivers/net/wireless/intel/iwlegacy/common.c 		e[addr / 2] = cpu_to_le16(r >> 16);
e                 348 drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c 	__le16 *e;
e                 367 drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c 	e = kmalloc(sz, GFP_KERNEL);
e                 368 drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c 	if (!e)
e                 411 drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c 			e[cache_addr / 2] = eeprom_data;
e                 432 drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c 			e[addr / 2] = cpu_to_le16(r >> 16);
e                 442 drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c 	*eeprom = (u8 *)e;
e                 448 drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c 	kfree(e);
e                  81 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	struct iwl_mvm_loc_entry *e, *t;
e                  87 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	list_for_each_entry_safe(e, t, &mvm->ftm_initiator.loc_list, list) {
e                  88 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 		list_del(&e->list);
e                  89 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 		kfree(e);
e                 700 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h 	struct _iwl_mvm_reorder_buf_entry e;
e                 569 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 		struct sk_buff_head *skb_list = &entries[index].e.frames;
e                 595 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 		while (skb_queue_empty(&entries[index].e.frames))
e                 599 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 			  entries[index].e.reorder_time + 1 +
e                 628 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 		if (skb_queue_empty(&entries[index].e.frames)) {
e                 637 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 		    !time_after(jiffies, entries[index].e.reorder_time +
e                 672 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 			  entries[index].e.reorder_time +
e                1020 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	tail = skb_peek_tail(&entries[index].e.frames);
e                1028 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	__skb_queue_tail(&entries[index].e.frames, skb);
e                1030 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	entries[index].e.reorder_time = jiffies;
e                2481 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			__skb_queue_purge(&entries[j].e.frames);
e                2520 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			__skb_queue_head_init(&entries[j].e.frames);
e                2455 drivers/net/wireless/intersil/hostap/hostap_ap.c 				iwe.u.freq.e = 1;
e                 816 drivers/net/wireless/intersil/hostap/hostap_ioctl.c 	if (freq->e == 1 &&
e                 823 drivers/net/wireless/intersil/hostap/hostap_ioctl.c 				freq->e = 0;
e                 830 drivers/net/wireless/intersil/hostap/hostap_ioctl.c 	if (freq->e != 0 || freq->m < 1 || freq->m > FREQ_COUNT ||
e                 862 drivers/net/wireless/intersil/hostap/hostap_ioctl.c 	freq->e = 1;
e                1007 drivers/net/wireless/intersil/hostap/hostap_ioctl.c 			range->freq[val].e = 1;
e                1848 drivers/net/wireless/intersil/hostap/hostap_ioctl.c 		iwe.u.freq.e = 1;
e                1044 drivers/net/wireless/intersil/hostap/hostap_wlan.h #define prism2_callback(d, e) do { } while (0)
e                 435 drivers/net/wireless/intersil/orinoco/wext.c 	if ((frq->e == 0) && (frq->m <= 1000)) {
e                 444 drivers/net/wireless/intersil/orinoco/wext.c 		for (i = 0; i < (6 - frq->e); i++)
e                 484 drivers/net/wireless/intersil/orinoco/wext.c 	frq->e = 1;
e                 300 drivers/net/wireless/intersil/prism54/isl_ioctl.c 		c = (fwrq->e == 1) ? channel_of_freq(fwrq->m / 100000) : 0;
e                 320 drivers/net/wireless/intersil/prism54/isl_ioctl.c 	fwrq->e = 3;
e                 498 drivers/net/wireless/intersil/prism54/isl_ioctl.c 		range->freq[i].e = 6;
e                 629 drivers/net/wireless/intersil/prism54/isl_ioctl.c 	iwe.u.freq.e = 6;
e                  98 drivers/net/wireless/mediatek/mt76/dma.c 	struct mt76_queue_entry *e = &q->entry[idx];
e                 102 drivers/net/wireless/mediatek/mt76/dma.c 	if (!e->skip_buf0) {
e                 118 drivers/net/wireless/mediatek/mt76/dma.c 	if (e->txwi == DMA_DUMMY_DATA)
e                 119 drivers/net/wireless/mediatek/mt76/dma.c 		e->txwi = NULL;
e                 121 drivers/net/wireless/mediatek/mt76/dma.c 	if (e->skb == DMA_DUMMY_DATA)
e                 122 drivers/net/wireless/mediatek/mt76/dma.c 		e->skb = NULL;
e                 124 drivers/net/wireless/mediatek/mt76/dma.c 	*prev_e = *e;
e                 125 drivers/net/wireless/mediatek/mt76/dma.c 	memset(e, 0, sizeof(*e));
e                 209 drivers/net/wireless/mediatek/mt76/dma.c 	struct mt76_queue_entry *e = &q->entry[idx];
e                 212 drivers/net/wireless/mediatek/mt76/dma.c 	void *buf = e->buf;
e                 226 drivers/net/wireless/mediatek/mt76/dma.c 	e->buf = NULL;
e                 290 drivers/net/wireless/mediatek/mt76/dma.c 	struct mt76_queue_entry e;
e                 353 drivers/net/wireless/mediatek/mt76/dma.c 	e.skb = tx_info.skb;
e                 354 drivers/net/wireless/mediatek/mt76/dma.c 	e.txwi = t;
e                 355 drivers/net/wireless/mediatek/mt76/dma.c 	dev->drv->tx_complete_skb(dev, qid, &e);
e                 300 drivers/net/wireless/mediatek/mt76/mt76.h 				struct mt76_queue_entry *e);
e                1154 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 			    struct mt76_queue_entry *e)
e                1157 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 	struct sk_buff *skb = e->skb;
e                1159 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 	if (!e->txwi) {
e                 233 drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h 			    struct mt76_queue_entry *e);
e                 223 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 			    struct mt76_queue_entry *e)
e                 225 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	if (!e->txwi) {
e                 226 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 		dev_kfree_skb_any(e->skb);
e                 231 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	if (e->skb == DMA_DUMMY_DATA) {
e                 237 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 		txp = mt7615_txwi_to_txp(mdev, e->txwi);
e                 242 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 		e->skb = t ? t->skb : NULL;
e                 245 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	if (e->skb)
e                 246 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 		mt76_tx_complete_skb(mdev, e->skb);
e                 259 drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h 			    struct mt76_queue_entry *e);
e                 827 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 			     struct mt76_queue_entry *e)
e                 833 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 	if (!e->txwi) {
e                 834 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 		dev_kfree_skb_any(e->skb);
e                 840 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 	txwi_ptr = mt76_get_txwi_ptr(mdev, e->txwi);
e                 844 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 	mt76_tx_complete_skb(mdev, e->skb);
e                 191 drivers/net/wireless/mediatek/mt76/mt76x02_mac.h 			     struct mt76_queue_entry *e);
e                  22 drivers/net/wireless/mediatek/mt76/mt76x02_usb.h 			      struct mt76_queue_entry *e);
e                  19 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 			      struct mt76_queue_entry *e)
e                  21 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 	mt76x02u_remove_dma_hdr(e->skb);
e                  22 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 	mt76_tx_complete_skb(mdev, e->skb);
e                 324 drivers/net/wireless/mediatek/mt76/usb.c mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e,
e                 332 drivers/net/wireless/mediatek/mt76/usb.c 	e->urb = kzalloc(size, GFP_KERNEL);
e                 333 drivers/net/wireless/mediatek/mt76/usb.c 	if (!e->urb)
e                 336 drivers/net/wireless/mediatek/mt76/usb.c 	usb_init_urb(e->urb);
e                 339 drivers/net/wireless/mediatek/mt76/usb.c 		e->urb->sg = (struct scatterlist *)(e->urb + 1);
e                 345 drivers/net/wireless/mediatek/mt76/usb.c mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e)
e                 349 drivers/net/wireless/mediatek/mt76/usb.c 	err = mt76u_urb_alloc(dev, e, MT_RX_SG_MAX_SIZE);
e                 353 drivers/net/wireless/mediatek/mt76/usb.c 	return mt76u_refill_rx(dev, e->urb, MT_RX_SG_MAX_SIZE,
e                 739 drivers/net/wireless/mediatek/mt76/usb.c 	struct mt76_queue_entry *e = urb->context;
e                 743 drivers/net/wireless/mediatek/mt76/usb.c 	e->done = true;
e                  12 drivers/net/wireless/mediatek/mt7601u/dma.c 				 struct mt7601u_dma_buf_rx *e, gfp_t gfp);
e                 128 drivers/net/wireless/mediatek/mt7601u/dma.c mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
e                 130 drivers/net/wireless/mediatek/mt7601u/dma.c 	u32 seg_len, data_len = e->urb->actual_length;
e                 131 drivers/net/wireless/mediatek/mt7601u/dma.c 	u8 *data = page_address(e->p);
e                 143 drivers/net/wireless/mediatek/mt7601u/dma.c 		mt7601u_rx_process_seg(dev, data, seg_len, new_p ? e->p : NULL);
e                 155 drivers/net/wireless/mediatek/mt7601u/dma.c 		__free_pages(e->p, MT_RX_ORDER);
e                 157 drivers/net/wireless/mediatek/mt7601u/dma.c 		e->p = new_p;
e                 173 drivers/net/wireless/mediatek/mt7601u/dma.c 	buf = &q->e[q->start];
e                 205 drivers/net/wireless/mediatek/mt7601u/dma.c 	if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
e                 218 drivers/net/wireless/mediatek/mt7601u/dma.c 	struct mt7601u_dma_buf_rx *e;
e                 220 drivers/net/wireless/mediatek/mt7601u/dma.c 	while ((e = mt7601u_rx_get_pending_entry(dev))) {
e                 221 drivers/net/wireless/mediatek/mt7601u/dma.c 		if (e->urb->status)
e                 224 drivers/net/wireless/mediatek/mt7601u/dma.c 		mt7601u_rx_process_entry(dev, e);
e                 225 drivers/net/wireless/mediatek/mt7601u/dma.c 		mt7601u_submit_rx_buf(dev, e, GFP_ATOMIC);
e                 250 drivers/net/wireless/mediatek/mt7601u/dma.c 	if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
e                 253 drivers/net/wireless/mediatek/mt7601u/dma.c 	skb = q->e[q->start].skb;
e                 254 drivers/net/wireless/mediatek/mt7601u/dma.c 	q->e[q->start].skb = NULL;
e                 300 drivers/net/wireless/mediatek/mt7601u/dma.c 	struct mt7601u_dma_buf_tx *e;
e                 312 drivers/net/wireless/mediatek/mt7601u/dma.c 	e = &q->e[q->end];
e                 313 drivers/net/wireless/mediatek/mt7601u/dma.c 	e->skb = skb;
e                 314 drivers/net/wireless/mediatek/mt7601u/dma.c 	usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
e                 316 drivers/net/wireless/mediatek/mt7601u/dma.c 	ret = usb_submit_urb(e->urb, GFP_ATOMIC);
e                 384 drivers/net/wireless/mediatek/mt7601u/dma.c 		usb_poison_urb(dev->rx_q.e[i].urb);
e                 388 drivers/net/wireless/mediatek/mt7601u/dma.c 				 struct mt7601u_dma_buf_rx *e, gfp_t gfp)
e                 391 drivers/net/wireless/mediatek/mt7601u/dma.c 	u8 *buf = page_address(e->p);
e                 397 drivers/net/wireless/mediatek/mt7601u/dma.c 	usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE,
e                 400 drivers/net/wireless/mediatek/mt7601u/dma.c 	trace_mt_submit_urb(dev, e->urb);
e                 401 drivers/net/wireless/mediatek/mt7601u/dma.c 	ret = usb_submit_urb(e->urb, gfp);
e                 413 drivers/net/wireless/mediatek/mt7601u/dma.c 		ret = mt7601u_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL);
e                 426 drivers/net/wireless/mediatek/mt7601u/dma.c 		__free_pages(dev->rx_q.e[i].p, MT_RX_ORDER);
e                 427 drivers/net/wireless/mediatek/mt7601u/dma.c 		usb_free_urb(dev->rx_q.e[i].urb);
e                 440 drivers/net/wireless/mediatek/mt7601u/dma.c 		dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
e                 441 drivers/net/wireless/mediatek/mt7601u/dma.c 		dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER);
e                 443 drivers/net/wireless/mediatek/mt7601u/dma.c 		if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p)
e                 455 drivers/net/wireless/mediatek/mt7601u/dma.c 		usb_poison_urb(q->e[i].urb);
e                 456 drivers/net/wireless/mediatek/mt7601u/dma.c 		if (q->e[i].skb)
e                 457 drivers/net/wireless/mediatek/mt7601u/dma.c 			mt7601u_tx_status(q->dev, q->e[i].skb);
e                 458 drivers/net/wireless/mediatek/mt7601u/dma.c 		usb_free_urb(q->e[i].urb);
e                 482 drivers/net/wireless/mediatek/mt7601u/dma.c 		q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
e                 483 drivers/net/wireless/mediatek/mt7601u/dma.c 		if (!q->e[i].urb)
e                  73 drivers/net/wireless/mediatek/mt7601u/mt7601u.h 	} e[N_RX_ENTRIES];
e                  89 drivers/net/wireless/mediatek/mt7601u/mt7601u.h 	} e[N_TX_ENTRIES];
e                1036 drivers/net/wireless/ray_cs.c 	if ((wrqu->freq.m > USA_HOP_MOD) || (wrqu->freq.e > 0))
e                1054 drivers/net/wireless/ray_cs.c 	wrqu->freq.e = 0;
e                 166 drivers/net/wireless/realtek/rtlwifi/debug.c RTL_DEBUG_IMPL_BB_SERIES(e, 0x0e00);
e                 605 drivers/net/wireless/realtek/rtw88/debug.c rtw_debug_impl_bb(e, 0x0e00);
e                  23 drivers/net/wireless/realtek/rtw88/rtw8822b.c 	ether_addr_copy(efuse->addr, map->e.mac_addr);
e                  96 drivers/net/wireless/realtek/rtw88/rtw8822b.h 		struct rtw8822be_efuse e;
e                  24 drivers/net/wireless/realtek/rtw88/rtw8822c.c 	ether_addr_copy(efuse->addr, map->e.mac_addr);
e                  95 drivers/net/wireless/realtek/rtw88/rtw8822c.h 		struct rtw8822ce_efuse e;
e                1467 drivers/net/wireless/wl3501_cs.c 	wrqu->freq.e = 1;
e                1594 drivers/net/wireless/wl3501_cs.c 		iwe.u.freq.e = 0;
e                 911 drivers/net/wireless/zydas/zd1201.c 	if (freq->e == 0)
e                 935 drivers/net/wireless/zydas/zd1201.c 	freq->e = 0;
e                1161 drivers/net/wireless/zydas/zd1201.c 		iwe.u.freq.e = 0;
e                 624 drivers/net/xen-netback/xenbus.c 	char *s, *e;
e                 637 drivers/net/xen-netback/xenbus.c 	b = simple_strtoul(s, &e, 10);
e                 638 drivers/net/xen-netback/xenbus.c 	if ((s == e) || (*e != ','))
e                 641 drivers/net/xen-netback/xenbus.c 	s = e + 1;
e                 642 drivers/net/xen-netback/xenbus.c 	u = simple_strtoul(s, &e, 10);
e                 643 drivers/net/xen-netback/xenbus.c 	if ((s == e) || (*e != '\0'))
e                 659 drivers/net/xen-netback/xenbus.c 	char *s, *e, *macstr;
e                 667 drivers/net/xen-netback/xenbus.c 		mac[i] = simple_strtoul(s, &e, 16);
e                 668 drivers/net/xen-netback/xenbus.c 		if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
e                 672 drivers/net/xen-netback/xenbus.c 		s = e+1;
e                1443 drivers/net/xen-netfront.c 	char *s, *e, *macstr;
e                1451 drivers/net/xen-netfront.c 		mac[i] = simple_strtoul(s, &e, 16);
e                1452 drivers/net/xen-netfront.c 		if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
e                1456 drivers/net/xen-netfront.c 		s = e+1;
e                 906 drivers/nvdimm/label.c 	struct nd_label_ent *label_ent, *e;
e                1080 drivers/nvdimm/label.c 	list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
e                1200 drivers/nvdimm/label.c 	struct nd_label_ent *label_ent, *e;
e                1216 drivers/nvdimm/label.c 	list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
e                2305 drivers/nvdimm/namespace_devs.c 	struct nd_label_ent *label_ent, *e;
e                2310 drivers/nvdimm/namespace_devs.c 	list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
e                2401 drivers/nvdimm/namespace_devs.c 			struct list_head *l, *e;
e                2412 drivers/nvdimm/namespace_devs.c 			list_for_each_safe(l, e, &nd_mapping->labels) {
e                 708 drivers/nvdimm/region_devs.c 	struct nd_label_ent *label_ent, *e;
e                 711 drivers/nvdimm/region_devs.c 	list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
e                 108 drivers/nvme/target/discovery.c 	struct nvmf_disc_rsp_page_entry *e = &hdr->entries[numrec];
e                 110 drivers/nvme/target/discovery.c 	e->trtype = port->disc_addr.trtype;
e                 111 drivers/nvme/target/discovery.c 	e->adrfam = port->disc_addr.adrfam;
e                 112 drivers/nvme/target/discovery.c 	e->treq = port->disc_addr.treq;
e                 113 drivers/nvme/target/discovery.c 	e->portid = port->disc_addr.portid;
e                 115 drivers/nvme/target/discovery.c 	e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC);
e                 116 drivers/nvme/target/discovery.c 	e->asqsz = cpu_to_le16(NVME_AQ_DEPTH);
e                 117 drivers/nvme/target/discovery.c 	e->subtype = type;
e                 118 drivers/nvme/target/discovery.c 	memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE);
e                 119 drivers/nvme/target/discovery.c 	memcpy(e->traddr, traddr, NVMF_TRADDR_SIZE);
e                 120 drivers/nvme/target/discovery.c 	memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE);
e                 121 drivers/nvme/target/discovery.c 	strncpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE);
e                 165 drivers/oprofile/cpu_buffer.c 	struct ring_buffer_event *e;
e                 166 drivers/oprofile/cpu_buffer.c 	e = ring_buffer_consume(op_ring_buffer, cpu, NULL, NULL);
e                 167 drivers/oprofile/cpu_buffer.c 	if (!e)
e                 170 drivers/oprofile/cpu_buffer.c 	entry->event = e;
e                 171 drivers/oprofile/cpu_buffer.c 	entry->sample = ring_buffer_event_data(e);
e                 172 drivers/oprofile/cpu_buffer.c 	entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample))
e                 479 drivers/pci/pcie/aer.c #define AER_GET_AGENT(t, e)						\
e                 480 drivers/pci/pcie/aer.c 	((e & AER_AGENT_COMPLETER_MASK(t)) ? AER_AGENT_COMPLETER :	\
e                 481 drivers/pci/pcie/aer.c 	(e & AER_AGENT_REQUESTER_MASK(t)) ? AER_AGENT_REQUESTER :	\
e                 482 drivers/pci/pcie/aer.c 	(e & AER_AGENT_TRANSMITTER_MASK(t)) ? AER_AGENT_TRANSMITTER :	\
e                 497 drivers/pci/pcie/aer.c #define AER_GET_LAYER_ERROR(t, e)					\
e                 498 drivers/pci/pcie/aer.c 	((e & AER_PHYSICAL_LAYER_ERROR_MASK(t)) ? AER_PHYSICAL_LAYER_ERROR : \
e                 499 drivers/pci/pcie/aer.c 	(e & AER_DATA_LINK_LAYER_ERROR_MASK(t)) ? AER_DATA_LINK_LAYER_ERROR : \
e                 407 drivers/pci/proc.c 	struct proc_dir_entry *e;
e                 426 drivers/pci/proc.c 	e = proc_create_data(name, S_IFREG | S_IRUGO | S_IWUSR, bus->procdir,
e                 428 drivers/pci/proc.c 	if (!e)
e                 430 drivers/pci/proc.c 	proc_set_size(e, dev->cfg_size);
e                 431 drivers/pci/proc.c 	dev->procent = e;
e                 793 drivers/perf/arm-ccn.c 		struct arm_ccn_pmu_event *e = &arm_ccn_pmu_events[i];
e                 797 drivers/perf/arm-ccn.c 		if (!arm_ccn_pmu_type_eq(type, e->type))
e                 799 drivers/perf/arm-ccn.c 		if (event_id != e->event)
e                 801 drivers/perf/arm-ccn.c 		if (e->num_ports && port >= e->num_ports) {
e                 806 drivers/perf/arm-ccn.c 		if (e->num_vcs && vc >= e->num_vcs) {
e                  23 drivers/pinctrl/intel/pinctrl-broxton.c #define BXT_COMMUNITY(s, e)				\
e                  32 drivers/pinctrl/intel/pinctrl-broxton.c 		.npins = ((e) - (s) + 1),		\
e                  25 drivers/pinctrl/intel/pinctrl-cannonlake.c #define CNL_GPP(r, s, e, g)				\
e                  29 drivers/pinctrl/intel/pinctrl-cannonlake.c 		.size = ((e) - (s) + 1),		\
e                  35 drivers/pinctrl/intel/pinctrl-cannonlake.c #define CNL_COMMUNITY(b, s, e, o, g)			\
e                  44 drivers/pinctrl/intel/pinctrl-cannonlake.c 		.npins = ((e) - (s) + 1),		\
e                  49 drivers/pinctrl/intel/pinctrl-cannonlake.c #define CNLLP_COMMUNITY(b, s, e, g)			\
e                  50 drivers/pinctrl/intel/pinctrl-cannonlake.c 	CNL_COMMUNITY(b, s, e, CNL_LP_HOSTSW_OWN, g)
e                  52 drivers/pinctrl/intel/pinctrl-cannonlake.c #define CNLH_COMMUNITY(b, s, e, g)			\
e                  53 drivers/pinctrl/intel/pinctrl-cannonlake.c 	CNL_COMMUNITY(b, s, e, CNL_H_HOSTSW_OWN, g)
e                  23 drivers/pinctrl/intel/pinctrl-cedarfork.c #define CDF_GPP(r, s, e)				\
e                  27 drivers/pinctrl/intel/pinctrl-cedarfork.c 		.size = ((e) - (s) + 1),		\
e                  30 drivers/pinctrl/intel/pinctrl-cedarfork.c #define CDF_COMMUNITY(b, s, e, g)			\
e                  39 drivers/pinctrl/intel/pinctrl-cedarfork.c 		.npins = ((e) - (s) + 1),		\
e                  23 drivers/pinctrl/intel/pinctrl-denverton.c #define DNV_GPP(n, s, e)				\
e                  27 drivers/pinctrl/intel/pinctrl-denverton.c 		.size = ((e) - (s) + 1),		\
e                  30 drivers/pinctrl/intel/pinctrl-denverton.c #define DNV_COMMUNITY(b, s, e, g)			\
e                  39 drivers/pinctrl/intel/pinctrl-denverton.c 		.npins = ((e) - (s) + 1),		\
e                  23 drivers/pinctrl/intel/pinctrl-geminilake.c #define GLK_COMMUNITY(s, e)				\
e                  32 drivers/pinctrl/intel/pinctrl-geminilake.c 		.npins = ((e) - (s) + 1),		\
e                  24 drivers/pinctrl/intel/pinctrl-icelake.c #define ICL_GPP(r, s, e, g)				\
e                  28 drivers/pinctrl/intel/pinctrl-icelake.c 		.size = ((e) - (s) + 1),		\
e                  34 drivers/pinctrl/intel/pinctrl-icelake.c #define ICL_COMMUNITY(b, s, e, g)			\
e                  43 drivers/pinctrl/intel/pinctrl-icelake.c 		.npins = ((e) - (s) + 1),		\
e                  23 drivers/pinctrl/intel/pinctrl-lewisburg.c #define LBG_COMMUNITY(b, s, e)				\
e                  33 drivers/pinctrl/intel/pinctrl-lewisburg.c 		.npins = ((e) - (s) + 1),		\
e                  72 drivers/pinctrl/intel/pinctrl-merrifield.c #define MRFLD_FAMILY(b, s, e)				\
e                  76 drivers/pinctrl/intel/pinctrl-merrifield.c 		.npins = (e) - (s) + 1,			\
e                  79 drivers/pinctrl/intel/pinctrl-merrifield.c #define MRFLD_FAMILY_PROTECTED(b, s, e)			\
e                  83 drivers/pinctrl/intel/pinctrl-merrifield.c 		.npins = (e) - (s) + 1,			\
e                  25 drivers/pinctrl/intel/pinctrl-sunrisepoint.c #define SPT_COMMUNITY(b, s, e)				\
e                  36 drivers/pinctrl/intel/pinctrl-sunrisepoint.c 		.npins = ((e) - (s) + 1),		\
e                  39 drivers/pinctrl/intel/pinctrl-sunrisepoint.c #define SPTH_GPP(r, s, e, g)				\
e                  43 drivers/pinctrl/intel/pinctrl-sunrisepoint.c 		.size = ((e) - (s) + 1),		\
e                  47 drivers/pinctrl/intel/pinctrl-sunrisepoint.c #define SPTH_COMMUNITY(b, s, e, g)			\
e                  56 drivers/pinctrl/intel/pinctrl-sunrisepoint.c 		.npins = ((e) - (s) + 1),		\
e                  69 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c 	const struct mtk_pin_field_calc *c, *e;
e                  83 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c 	e = c + rc->nranges;
e                  85 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c 	while (c < e) {
e                  91 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c 	if (c >= e) {
e                 897 drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c #define NPCM7XX_PINCFG(a, b, c, d, e, f, g, h, i, j, k) \
e                 899 drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c 			.fn1 = fn_ ## e, .reg1 = NPCM7XX_GCR_ ## f, .bit1 = g, \
e                 384 drivers/pinctrl/pinctrl-lpc18xx.c LPC_P(e,0,      R,     R,     R,    EMC,   GPIO,   CAN1,      R,      R,      0, ND);
e                 385 drivers/pinctrl/pinctrl-lpc18xx.c LPC_P(e,1,      R,     R,     R,    EMC,   GPIO,   CAN1,      R,      R,      0, ND);
e                 386 drivers/pinctrl/pinctrl-lpc18xx.c LPC_P(e,2,ADCTRIG,  CAN0,     R,    EMC,   GPIO,      R,      R,      R,      0, ND);
e                 387 drivers/pinctrl/pinctrl-lpc18xx.c LPC_P(e,3,      R,  CAN0,ADCTRIG,   EMC,   GPIO,      R,      R,      R,      0, ND);
e                 388 drivers/pinctrl/pinctrl-lpc18xx.c LPC_P(e,4,      R,   NMI,     R,    EMC,   GPIO,      R,      R,      R,      0, ND);
e                 389 drivers/pinctrl/pinctrl-lpc18xx.c LPC_P(e,5,      R, CTOUT, UART1,    EMC,   GPIO,      R,      R,      R,      0, ND);
e                 390 drivers/pinctrl/pinctrl-lpc18xx.c LPC_P(e,6,      R, CTOUT, UART1,    EMC,   GPIO,      R,      R,      R,      0, ND);
e                 391 drivers/pinctrl/pinctrl-lpc18xx.c LPC_P(e,7,      R, CTOUT, UART1,    EMC,   GPIO,      R,      R,      R,      0, ND);
e                 392 drivers/pinctrl/pinctrl-lpc18xx.c LPC_P(e,8,      R, CTOUT, UART1,    EMC,   GPIO,      R,      R,      R,      0, ND);
e                 393 drivers/pinctrl/pinctrl-lpc18xx.c LPC_P(e,9,      R,  CTIN, UART1,    EMC,   GPIO,      R,      R,      R,      0, ND);
e                 394 drivers/pinctrl/pinctrl-lpc18xx.c LPC_P(e,10,     R,  CTIN, UART1,    EMC,   GPIO,      R,      R,      R,      0, ND);
e                 395 drivers/pinctrl/pinctrl-lpc18xx.c LPC_P(e,11,     R, CTOUT, UART1,    EMC,   GPIO,      R,      R,      R,      0, ND);
e                 396 drivers/pinctrl/pinctrl-lpc18xx.c LPC_P(e,12,     R, CTOUT, UART1,    EMC,   GPIO,      R,      R,      R,      0, ND);
e                 397 drivers/pinctrl/pinctrl-lpc18xx.c LPC_P(e,13,     R, CTOUT,  I2C1,    EMC,   GPIO,      R,      R,      R,      0, ND);
e                 398 drivers/pinctrl/pinctrl-lpc18xx.c LPC_P(e,14,     R,     R,     R,    EMC,   GPIO,      R,      R,      R,      0, ND);
e                 399 drivers/pinctrl/pinctrl-lpc18xx.c LPC_P(e,15,     R, CTOUT,  I2C1,    EMC,   GPIO,      R,      R,      R,      0, ND);
e                 594 drivers/pinctrl/pinctrl-lpc18xx.c 	LPC18XX_PIN_P(e,0),
e                 595 drivers/pinctrl/pinctrl-lpc18xx.c 	LPC18XX_PIN_P(e,1),
e                 596 drivers/pinctrl/pinctrl-lpc18xx.c 	LPC18XX_PIN_P(e,2),
e                 597 drivers/pinctrl/pinctrl-lpc18xx.c 	LPC18XX_PIN_P(e,3),
e                 598 drivers/pinctrl/pinctrl-lpc18xx.c 	LPC18XX_PIN_P(e,4),
e                 599 drivers/pinctrl/pinctrl-lpc18xx.c 	LPC18XX_PIN_P(e,5),
e                 600 drivers/pinctrl/pinctrl-lpc18xx.c 	LPC18XX_PIN_P(e,6),
e                 601 drivers/pinctrl/pinctrl-lpc18xx.c 	LPC18XX_PIN_P(e,7),
e                 602 drivers/pinctrl/pinctrl-lpc18xx.c 	LPC18XX_PIN_P(e,8),
e                 603 drivers/pinctrl/pinctrl-lpc18xx.c 	LPC18XX_PIN_P(e,9),
e                 604 drivers/pinctrl/pinctrl-lpc18xx.c 	LPC18XX_PIN_P(e,10),
e                 605 drivers/pinctrl/pinctrl-lpc18xx.c 	LPC18XX_PIN_P(e,11),
e                 606 drivers/pinctrl/pinctrl-lpc18xx.c 	LPC18XX_PIN_P(e,12),
e                 607 drivers/pinctrl/pinctrl-lpc18xx.c 	LPC18XX_PIN_P(e,13),
e                 608 drivers/pinctrl/pinctrl-lpc18xx.c 	LPC18XX_PIN_P(e,14),
e                 609 drivers/pinctrl/pinctrl-lpc18xx.c 	LPC18XX_PIN_P(e,15),
e                 152 drivers/pinctrl/uniphier/pinctrl-uniphier.h #define UNIPHIER_PINCTRL_PIN(a, b, c, d, e, f, g)			\
e                 156 drivers/pinctrl/uniphier/pinctrl-uniphier.h 	.drv_data = (void *)UNIPHIER_PIN_ATTR_PACKED(c, d, e, f, g),	\
e                 258 drivers/platform/chrome/cros_ec_chardev.c 	struct ec_event *event, *e;
e                 263 drivers/platform/chrome/cros_ec_chardev.c 	list_for_each_entry_safe(event, e, &priv->events, node) {
e                  61 drivers/pnp/isapnp/proc.c 	struct proc_dir_entry *de, *e;
e                  71 drivers/pnp/isapnp/proc.c 	e = dev->procent = proc_create_data(name, S_IFREG | S_IRUGO, de,
e                  73 drivers/pnp/isapnp/proc.c 	if (!e)
e                  75 drivers/pnp/isapnp/proc.c 	proc_set_size(e, 256);
e                  89 drivers/ras/cec.c #define PFN(e)			((e) >> PAGE_SHIFT)
e                  90 drivers/ras/cec.c #define DECAY(e)		(((e) >> COUNT_BITS) & DECAY_MASK)
e                  91 drivers/ras/cec.c #define COUNT(e)		((unsigned int)(e) & COUNT_MASK)
e                  92 drivers/ras/cec.c #define FULL_COUNT(e)		((e) & (PAGE_SIZE - 1))
e                 174 drivers/rpmsg/qcom_smd.c #define to_smd_endpoint(e)	container_of(e, struct qcom_smd_endpoint, ept)
e                 422 drivers/s390/cio/device_fsm.c static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e)
e                 424 drivers/s390/cio/device_fsm.c 	switch (e) {
e                  98 drivers/s390/net/fsm.c 		int e = fi->history[idx].event;
e                 101 drivers/s390/net/fsm.c 		if (e == -1)
e                 107 drivers/s390/net/fsm.c 			       fi->f->event_names[e]);
e                 186 drivers/s390/net/ism.h 	u32 e : 1;	/* event bit */
e                 427 drivers/s390/net/ism_drv.c 	if (ism->sba->e) {
e                 428 drivers/s390/net/ism_drv.c 		ism->sba->e = 0;
e                3450 drivers/s390/net/qeth_core_main.c 		int e = 0;
e                3452 drivers/s390/net/qeth_core_main.c 		while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
e                3453 drivers/s390/net/qeth_core_main.c 		       buffer->element[e].addr) {
e                3456 drivers/s390/net/qeth_core_main.c 			phys_aob_addr = (unsigned long) buffer->element[e].addr;
e                3458 drivers/s390/net/qeth_core_main.c 			++e;
e                1610 drivers/s390/net/qeth_l3_main.c 	int e;
e                1637 drivers/s390/net/qeth_l3_main.c 	for (e = 0; e < qdata->no_entries; ++e) {
e                 158 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 			      const struct l2t_entry *e)
e                 174 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 			V_L2T_IDX(e->idx) | V_TX_CHANNEL(e->smt_idx));
e                 183 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		csk->mss_idx, e->idx, e->smt_idx);
e                 204 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 				struct l2t_entry *e)
e                 323 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 			       struct l2t_entry *e)
e                2361 drivers/scsi/gdth.c     gdth_evt_str *e;
e                2376 drivers/scsi/gdth.c         e = &ebuffer[elastidx];
e                2377 drivers/scsi/gdth.c 	e->last_stamp = (u32)ktime_get_real_seconds();
e                2378 drivers/scsi/gdth.c         ++e->same_count;
e                2390 drivers/scsi/gdth.c         e = &ebuffer[elastidx];
e                2391 drivers/scsi/gdth.c         e->event_source = source;
e                2392 drivers/scsi/gdth.c         e->event_idx = idx;
e                2393 drivers/scsi/gdth.c 	e->first_stamp = e->last_stamp = (u32)ktime_get_real_seconds();
e                2394 drivers/scsi/gdth.c         e->same_count = 1;
e                2395 drivers/scsi/gdth.c         e->event_data = *evt;
e                2396 drivers/scsi/gdth.c         e->application = 0;
e                2398 drivers/scsi/gdth.c     return e;
e                2403 drivers/scsi/gdth.c     gdth_evt_str *e;
e                2419 drivers/scsi/gdth.c     e = &ebuffer[eindex];
e                2420 drivers/scsi/gdth.c     if (e->event_source != 0) {
e                2427 drivers/scsi/gdth.c         memcpy(estr, e, sizeof(gdth_evt_str));
e                2436 drivers/scsi/gdth.c     gdth_evt_str *e;
e                2445 drivers/scsi/gdth.c         e = &ebuffer[eindex];
e                2446 drivers/scsi/gdth.c         if (e->event_source == 0)
e                2448 drivers/scsi/gdth.c         if ((e->application & application) == 0) {
e                2449 drivers/scsi/gdth.c             e->application |= application;
e                2459 drivers/scsi/gdth.c         memcpy(estr, e, sizeof(gdth_evt_str));
e                 105 drivers/scsi/isci/request.c static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg)
e                 107 drivers/scsi/isci/request.c 	e->length = sg_dma_len(sg);
e                 108 drivers/scsi/isci/request.c 	e->address_upper = upper_32_bits(sg_dma_address(sg));
e                 109 drivers/scsi/isci/request.c 	e->address_lower = lower_32_bits(sg_dma_address(sg));
e                 110 drivers/scsi/isci/request.c 	e->address_modifier = 0;
e                4221 drivers/scsi/pm8001/pm8001_hwi.c 		buf_prd->im_len.e = 0;
e                4966 drivers/scsi/pm8001/pm8001_hwi.c 	flash_update_info.sgl.im_len.e = 0;
e                 287 drivers/scsi/pm8001/pm8001_sas.h 	__le32			e;
e                  80 drivers/scsi/qla2xxx/qla_gbl.h int qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e);
e                 506 drivers/scsi/qla2xxx/qla_gs.c 	struct qla_work_evt *e;
e                 527 drivers/scsi/qla2xxx/qla_gs.c 		e = qla2x00_alloc_work(vha, QLA_EVT_SP_RETRY);
e                 528 drivers/scsi/qla2xxx/qla_gs.c 		if (!e)
e                 532 drivers/scsi/qla2xxx/qla_gs.c 		e->u.iosb.sp = sp;
e                 533 drivers/scsi/qla2xxx/qla_gs.c 		qla2x00_post_work(vha, e);
e                 538 drivers/scsi/qla2xxx/qla_gs.c 	e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
e                 540 drivers/scsi/qla2xxx/qla_gs.c 	if (!e) {
e                 563 drivers/scsi/qla2xxx/qla_gs.c 	e->u.iosb.sp = sp;
e                 564 drivers/scsi/qla2xxx/qla_gs.c 	qla2x00_post_work(vha, e);
e                2954 drivers/scsi/qla2xxx/qla_gs.c 	struct qla_work_evt *e;
e                2956 drivers/scsi/qla2xxx/qla_gs.c 	e = qla2x00_alloc_work(vha, QLA_EVT_GPSC);
e                2957 drivers/scsi/qla2xxx/qla_gs.c 	if (!e)
e                2960 drivers/scsi/qla2xxx/qla_gs.c 	e->u.fcport.fcport = fcport;
e                2962 drivers/scsi/qla2xxx/qla_gs.c 	return qla2x00_post_work(vha, e);
e                3103 drivers/scsi/qla2xxx/qla_gs.c 	struct qla_work_evt *e;
e                3109 drivers/scsi/qla2xxx/qla_gs.c 	e = qla2x00_alloc_work(vha, QLA_EVT_GPNID);
e                3110 drivers/scsi/qla2xxx/qla_gs.c 	if (!e)
e                3113 drivers/scsi/qla2xxx/qla_gs.c 	e->u.gpnid.id = *id;
e                3114 drivers/scsi/qla2xxx/qla_gs.c 	return qla2x00_post_work(vha, e);
e                3257 drivers/scsi/qla2xxx/qla_gs.c 	struct qla_work_evt *e;
e                3296 drivers/scsi/qla2xxx/qla_gs.c 	e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
e                3297 drivers/scsi/qla2xxx/qla_gs.c 	if (!e) {
e                3315 drivers/scsi/qla2xxx/qla_gs.c 	e->u.iosb.sp = sp;
e                3316 drivers/scsi/qla2xxx/qla_gs.c 	qla2x00_post_work(vha, e);
e                3729 drivers/scsi/qla2xxx/qla_gs.c 	struct qla_work_evt *e;
e                3734 drivers/scsi/qla2xxx/qla_gs.c 	e = qla2x00_alloc_work(vha, cmd);
e                3735 drivers/scsi/qla2xxx/qla_gs.c 	if (!e)
e                3738 drivers/scsi/qla2xxx/qla_gs.c 	e->u.iosb.sp = sp;
e                3740 drivers/scsi/qla2xxx/qla_gs.c 	return qla2x00_post_work(vha, e);
e                3746 drivers/scsi/qla2xxx/qla_gs.c 	struct qla_work_evt *e;
e                3751 drivers/scsi/qla2xxx/qla_gs.c 	e = qla2x00_alloc_work(vha, cmd);
e                3752 drivers/scsi/qla2xxx/qla_gs.c 	if (!e)
e                3755 drivers/scsi/qla2xxx/qla_gs.c 	e->u.gpnft.fc4_type = FC4_TYPE_NVME;
e                3756 drivers/scsi/qla2xxx/qla_gs.c 	e->u.gpnft.sp = sp;
e                3758 drivers/scsi/qla2xxx/qla_gs.c 	return qla2x00_post_work(vha, e);
e                4323 drivers/scsi/qla2xxx/qla_gs.c 	struct qla_work_evt *e;
e                4331 drivers/scsi/qla2xxx/qla_gs.c 	e = qla2x00_alloc_work(vha, QLA_EVT_GNNID);
e                4332 drivers/scsi/qla2xxx/qla_gs.c 	if (!e)
e                4335 drivers/scsi/qla2xxx/qla_gs.c 	e->u.fcport.fcport = fcport;
e                4336 drivers/scsi/qla2xxx/qla_gs.c 	return qla2x00_post_work(vha, e);
e                4452 drivers/scsi/qla2xxx/qla_gs.c 	struct qla_work_evt *e;
e                4460 drivers/scsi/qla2xxx/qla_gs.c 	e = qla2x00_alloc_work(vha, QLA_EVT_GFPNID);
e                4461 drivers/scsi/qla2xxx/qla_gs.c 	if (!e)
e                4464 drivers/scsi/qla2xxx/qla_gs.c 	e->u.fcport.fcport = fcport;
e                4465 drivers/scsi/qla2xxx/qla_gs.c 	return qla2x00_post_work(vha, e);
e                 529 drivers/scsi/qla2xxx/qla_init.c 	struct qla_work_evt *e;
e                 531 drivers/scsi/qla2xxx/qla_init.c 	e = qla2x00_alloc_work(vha, QLA_EVT_ELS_PLOGI);
e                 532 drivers/scsi/qla2xxx/qla_init.c 	if (!e)
e                 535 drivers/scsi/qla2xxx/qla_init.c 	e->u.fcport.fcport = fcport;
e                 538 drivers/scsi/qla2xxx/qla_init.c 	return qla2x00_post_work(vha, e);
e                 684 drivers/scsi/qla2xxx/qla_init.c 	struct get_name_list_extended *e;
e                 731 drivers/scsi/qla2xxx/qla_init.c 		e = &vha->gnl.l[i];
e                 732 drivers/scsi/qla2xxx/qla_init.c 		wwn = wwn_to_u64(e->port_name);
e                 733 drivers/scsi/qla2xxx/qla_init.c 		id.b.domain = e->port_id[2];
e                 734 drivers/scsi/qla2xxx/qla_init.c 		id.b.area = e->port_id[1];
e                 735 drivers/scsi/qla2xxx/qla_init.c 		id.b.al_pa = e->port_id[0];
e                 746 drivers/scsi/qla2xxx/qla_init.c 		loop_id = le16_to_cpu(e->nport_handle);
e                 748 drivers/scsi/qla2xxx/qla_init.c 		nvme_cls = e->current_login_state >> 4;
e                 749 drivers/scsi/qla2xxx/qla_init.c 		current_login_state = e->current_login_state & 0xf;
e                 764 drivers/scsi/qla2xxx/qla_init.c 		    e->current_login_state, fcport->fw_login_state,
e                 816 drivers/scsi/qla2xxx/qla_init.c 				if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
e                 852 drivers/scsi/qla2xxx/qla_init.c 				if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
e                 894 drivers/scsi/qla2xxx/qla_init.c 				e = &vha->gnl.l[i];
e                 895 drivers/scsi/qla2xxx/qla_init.c 				id.b.domain = e->port_id[0];
e                 896 drivers/scsi/qla2xxx/qla_init.c 				id.b.area = e->port_id[1];
e                 897 drivers/scsi/qla2xxx/qla_init.c 				id.b.al_pa = e->port_id[2];
e                 899 drivers/scsi/qla2xxx/qla_init.c 				loop_id = le16_to_cpu(e->nport_handle);
e                 904 drivers/scsi/qla2xxx/qla_init.c 						e->port_name, 0);
e                 972 drivers/scsi/qla2xxx/qla_init.c 	struct get_name_list_extended *e;
e                 998 drivers/scsi/qla2xxx/qla_init.c 		e = &vha->gnl.l[i];
e                 999 drivers/scsi/qla2xxx/qla_init.c 		loop_id = le16_to_cpu(e->nport_handle);
e                1003 drivers/scsi/qla2xxx/qla_init.c 		wwn = wwn_to_u64(e->port_name);
e                1007 drivers/scsi/qla2xxx/qla_init.c 		    __func__, (void *)&wwn, e->port_id[2], e->port_id[1],
e                1008 drivers/scsi/qla2xxx/qla_init.c 		    e->port_id[0], e->current_login_state, e->last_login_state,
e                1035 drivers/scsi/qla2xxx/qla_init.c 		e = &vha->gnl.l[i];
e                1036 drivers/scsi/qla2xxx/qla_init.c 		wwn = wwn_to_u64(e->port_name);
e                1047 drivers/scsi/qla2xxx/qla_init.c 		id.b.domain = e->port_id[2];
e                1048 drivers/scsi/qla2xxx/qla_init.c 		id.b.area = e->port_id[1];
e                1049 drivers/scsi/qla2xxx/qla_init.c 		id.b.al_pa = e->port_id[0];
e                1056 drivers/scsi/qla2xxx/qla_init.c 			wwnn = wwn_to_u64(e->node_name);
e                1141 drivers/scsi/qla2xxx/qla_init.c 	struct qla_work_evt *e;
e                1143 drivers/scsi/qla2xxx/qla_init.c 	e = qla2x00_alloc_work(vha, QLA_EVT_GNL);
e                1144 drivers/scsi/qla2xxx/qla_init.c 	if (!e)
e                1147 drivers/scsi/qla2xxx/qla_init.c 	e->u.fcport.fcport = fcport;
e                1149 drivers/scsi/qla2xxx/qla_init.c 	return qla2x00_post_work(vha, e);
e                1184 drivers/scsi/qla2xxx/qla_init.c 	struct qla_work_evt *e;
e                1186 drivers/scsi/qla2xxx/qla_init.c 	e = qla2x00_alloc_work(vha, QLA_EVT_PRLI);
e                1187 drivers/scsi/qla2xxx/qla_init.c 	if (!e)
e                1190 drivers/scsi/qla2xxx/qla_init.c 	e->u.fcport.fcport = fcport;
e                1192 drivers/scsi/qla2xxx/qla_init.c 	return qla2x00_post_work(vha, e);
e                1285 drivers/scsi/qla2xxx/qla_init.c 	struct qla_work_evt *e;
e                1287 drivers/scsi/qla2xxx/qla_init.c 	e = qla2x00_alloc_work(vha, QLA_EVT_GPDB);
e                1288 drivers/scsi/qla2xxx/qla_init.c 	if (!e)
e                1291 drivers/scsi/qla2xxx/qla_init.c 	e->u.fcport.fcport = fcport;
e                1292 drivers/scsi/qla2xxx/qla_init.c 	e->u.fcport.opt = opt;
e                1294 drivers/scsi/qla2xxx/qla_init.c 	return qla2x00_post_work(vha, e);
e                1677 drivers/scsi/qla2xxx/qla_init.c 	struct qla_work_evt *e;
e                1679 drivers/scsi/qla2xxx/qla_init.c 	e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
e                1680 drivers/scsi/qla2xxx/qla_init.c 	if (!e)
e                1683 drivers/scsi/qla2xxx/qla_init.c 	e->u.new_sess.id = *id;
e                1684 drivers/scsi/qla2xxx/qla_init.c 	e->u.new_sess.pla = pla;
e                1685 drivers/scsi/qla2xxx/qla_init.c 	e->u.new_sess.fc4_type = fc4_type;
e                1686 drivers/scsi/qla2xxx/qla_init.c 	memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE);
e                1688 drivers/scsi/qla2xxx/qla_init.c 		memcpy(e->u.new_sess.node_name, node_name, WWN_SIZE);
e                1690 drivers/scsi/qla2xxx/qla_init.c 	return qla2x00_post_work(vha, e);
e                5313 drivers/scsi/qla2xxx/qla_init.c 	struct qla_work_evt *e;
e                5315 drivers/scsi/qla2xxx/qla_init.c 	e = qla2x00_alloc_work(vha, QLA_EVT_IIDMA);
e                5316 drivers/scsi/qla2xxx/qla_init.c 	if (!e)
e                5319 drivers/scsi/qla2xxx/qla_init.c 	e->u.fcport.fcport = fcport;
e                5320 drivers/scsi/qla2xxx/qla_init.c 	return qla2x00_post_work(vha, e);
e                2751 drivers/scsi/qla2xxx/qla_iocb.c 	struct qla_work_evt *e;
e                2773 drivers/scsi/qla2xxx/qla_iocb.c 		e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
e                2774 drivers/scsi/qla2xxx/qla_iocb.c 		if (!e) {
e                2781 drivers/scsi/qla2xxx/qla_iocb.c 		e->u.iosb.sp = sp;
e                2782 drivers/scsi/qla2xxx/qla_iocb.c 		qla2x00_post_work(vha, e);
e                  26 drivers/scsi/qla2xxx/qla_mbx.c 	struct mb_cmd_name *e;
e                  29 drivers/scsi/qla2xxx/qla_mbx.c 		e = mb_str + i;
e                  30 drivers/scsi/qla2xxx/qla_mbx.c 		if (cmd == e->cmd)
e                  31 drivers/scsi/qla2xxx/qla_mbx.c 			return e->str;
e                4857 drivers/scsi/qla2xxx/qla_os.c 	struct qla_work_evt *e;
e                4867 drivers/scsi/qla2xxx/qla_os.c 	e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
e                4868 drivers/scsi/qla2xxx/qla_os.c 	if (!e) {
e                4873 drivers/scsi/qla2xxx/qla_os.c 	INIT_LIST_HEAD(&e->list);
e                4874 drivers/scsi/qla2xxx/qla_os.c 	e->type = type;
e                4875 drivers/scsi/qla2xxx/qla_os.c 	e->flags = QLA_EVT_FLAG_FREE;
e                4876 drivers/scsi/qla2xxx/qla_os.c 	return e;
e                4880 drivers/scsi/qla2xxx/qla_os.c qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
e                4886 drivers/scsi/qla2xxx/qla_os.c 	list_add_tail(&e->list, &vha->work_list);
e                4903 drivers/scsi/qla2xxx/qla_os.c 	struct qla_work_evt *e;
e                4905 drivers/scsi/qla2xxx/qla_os.c 	e = qla2x00_alloc_work(vha, QLA_EVT_AEN);
e                4906 drivers/scsi/qla2xxx/qla_os.c 	if (!e)
e                4909 drivers/scsi/qla2xxx/qla_os.c 	e->u.aen.code = code;
e                4910 drivers/scsi/qla2xxx/qla_os.c 	e->u.aen.data = data;
e                4911 drivers/scsi/qla2xxx/qla_os.c 	return qla2x00_post_work(vha, e);
e                4917 drivers/scsi/qla2xxx/qla_os.c 	struct qla_work_evt *e;
e                4919 drivers/scsi/qla2xxx/qla_os.c 	e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK);
e                4920 drivers/scsi/qla2xxx/qla_os.c 	if (!e)
e                4923 drivers/scsi/qla2xxx/qla_os.c 	memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
e                4924 drivers/scsi/qla2xxx/qla_os.c 	return qla2x00_post_work(vha, e);
e                4932 drivers/scsi/qla2xxx/qla_os.c 	struct qla_work_evt *e;			\
e                4934 drivers/scsi/qla2xxx/qla_os.c 	e = qla2x00_alloc_work(vha, type);	\
e                4935 drivers/scsi/qla2xxx/qla_os.c 	if (!e)					\
e                4938 drivers/scsi/qla2xxx/qla_os.c 	e->u.logio.fcport = fcport;		\
e                4940 drivers/scsi/qla2xxx/qla_os.c 		e->u.logio.data[0] = data[0];	\
e                4941 drivers/scsi/qla2xxx/qla_os.c 		e->u.logio.data[1] = data[1];	\
e                4944 drivers/scsi/qla2xxx/qla_os.c 	return qla2x00_post_work(vha, e);	\
e                4957 drivers/scsi/qla2xxx/qla_os.c 	struct qla_work_evt *e;
e                4959 drivers/scsi/qla2xxx/qla_os.c 	e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT);
e                4960 drivers/scsi/qla2xxx/qla_os.c 	if (!e)
e                4963 drivers/scsi/qla2xxx/qla_os.c 	e->u.uevent.code = code;
e                4964 drivers/scsi/qla2xxx/qla_os.c 	return qla2x00_post_work(vha, e);
e                4989 drivers/scsi/qla2xxx/qla_os.c 	struct qla_work_evt *e;
e                4991 drivers/scsi/qla2xxx/qla_os.c 	e = qla2x00_alloc_work(vha, QLA_EVT_AENFX);
e                4992 drivers/scsi/qla2xxx/qla_os.c 	if (!e)
e                4995 drivers/scsi/qla2xxx/qla_os.c 	e->u.aenfx.evtcode = evtcode;
e                4996 drivers/scsi/qla2xxx/qla_os.c 	e->u.aenfx.count = cnt;
e                4997 drivers/scsi/qla2xxx/qla_os.c 	memcpy(e->u.aenfx.mbx, data, sizeof(*data) * cnt);
e                4998 drivers/scsi/qla2xxx/qla_os.c 	return qla2x00_post_work(vha, e);
e                5023 drivers/scsi/qla2xxx/qla_os.c void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
e                5028 drivers/scsi/qla2xxx/qla_os.c 	    (struct qlt_plogi_ack_t *)e->u.new_sess.pla;
e                5033 drivers/scsi/qla2xxx/qla_os.c 	    __func__, __LINE__, e->u.new_sess.port_name);
e                5036 drivers/scsi/qla2xxx/qla_os.c 	fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1);
e                5038 drivers/scsi/qla2xxx/qla_os.c 		fcport->d_id = e->u.new_sess.id;
e                5054 drivers/scsi/qla2xxx/qla_os.c 			fcport->d_id = e->u.new_sess.id;
e                5057 drivers/scsi/qla2xxx/qla_os.c 			if (e->u.new_sess.fc4_type == FS_FC4TYPE_FCP)
e                5060 drivers/scsi/qla2xxx/qla_os.c 			if (e->u.new_sess.fc4_type == FS_FC4TYPE_NVME) {
e                5065 drivers/scsi/qla2xxx/qla_os.c 			memcpy(fcport->port_name, e->u.new_sess.port_name,
e                5068 drivers/scsi/qla2xxx/qla_os.c 			if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N)
e                5074 drivers/scsi/qla2xxx/qla_os.c 				   __func__, e->u.new_sess.port_name);
e                5086 drivers/scsi/qla2xxx/qla_os.c 		    e->u.new_sess.port_name, 1);
e                5111 drivers/scsi/qla2xxx/qla_os.c 		memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE);
e                5141 drivers/scsi/qla2xxx/qla_os.c 			    &e->u.new_sess.id, 1);
e                5195 drivers/scsi/qla2xxx/qla_os.c static void qla_sp_retry(struct scsi_qla_host *vha, struct qla_work_evt *e)
e                5197 drivers/scsi/qla2xxx/qla_os.c 	struct srb *sp = e->u.iosb.sp;
e                5212 drivers/scsi/qla2xxx/qla_os.c 	struct qla_work_evt *e, *tmp;
e                5221 drivers/scsi/qla2xxx/qla_os.c 	list_for_each_entry_safe(e, tmp, &work, list) {
e                5223 drivers/scsi/qla2xxx/qla_os.c 		switch (e->type) {
e                5226 drivers/scsi/qla2xxx/qla_os.c 			    e->u.aen.code, e->u.aen.data);
e                5229 drivers/scsi/qla2xxx/qla_os.c 			qla81xx_idc_ack(vha, e->u.idc_ack.mb);
e                5232 drivers/scsi/qla2xxx/qla_os.c 			qla2x00_async_login(vha, e->u.logio.fcport,
e                5233 drivers/scsi/qla2xxx/qla_os.c 			    e->u.logio.data);
e                5236 drivers/scsi/qla2xxx/qla_os.c 			rc = qla2x00_async_logout(vha, e->u.logio.fcport);
e                5239 drivers/scsi/qla2xxx/qla_os.c 			qla2x00_async_logout_done(vha, e->u.logio.fcport,
e                5240 drivers/scsi/qla2xxx/qla_os.c 			    e->u.logio.data);
e                5243 drivers/scsi/qla2xxx/qla_os.c 			qla2x00_async_adisc(vha, e->u.logio.fcport,
e                5244 drivers/scsi/qla2xxx/qla_os.c 			    e->u.logio.data);
e                5247 drivers/scsi/qla2xxx/qla_os.c 			qla2x00_uevent_emit(vha, e->u.uevent.code);
e                5250 drivers/scsi/qla2xxx/qla_os.c 			qlafx00_process_aen(vha, e);
e                5253 drivers/scsi/qla2xxx/qla_os.c 			qla24xx_async_gpnid(vha, &e->u.gpnid.id);
e                5256 drivers/scsi/qla2xxx/qla_os.c 			qla24xx_sp_unmap(vha, e->u.iosb.sp);
e                5262 drivers/scsi/qla2xxx/qla_os.c 			qla24xx_create_new_sess(vha, e);
e                5265 drivers/scsi/qla2xxx/qla_os.c 			qla24xx_async_gpdb(vha, e->u.fcport.fcport,
e                5266 drivers/scsi/qla2xxx/qla_os.c 			    e->u.fcport.opt);
e                5269 drivers/scsi/qla2xxx/qla_os.c 			qla24xx_async_prli(vha, e->u.fcport.fcport);
e                5272 drivers/scsi/qla2xxx/qla_os.c 			qla24xx_async_gpsc(vha, e->u.fcport.fcport);
e                5275 drivers/scsi/qla2xxx/qla_os.c 			qla24xx_async_gnl(vha, e->u.fcport.fcport);
e                5278 drivers/scsi/qla2xxx/qla_os.c 			qla24xx_do_nack_work(vha, e);
e                5281 drivers/scsi/qla2xxx/qla_os.c 			rc = qla2x00_async_prlo(vha, e->u.logio.fcport);
e                5284 drivers/scsi/qla2xxx/qla_os.c 			qla2x00_async_prlo_done(vha, e->u.logio.fcport,
e                5285 drivers/scsi/qla2xxx/qla_os.c 			    e->u.logio.data);
e                5288 drivers/scsi/qla2xxx/qla_os.c 			qla24xx_async_gpnft(vha, e->u.gpnft.fc4_type,
e                5289 drivers/scsi/qla2xxx/qla_os.c 			    e->u.gpnft.sp);
e                5292 drivers/scsi/qla2xxx/qla_os.c 			qla24xx_async_gpnft_done(vha, e->u.iosb.sp);
e                5295 drivers/scsi/qla2xxx/qla_os.c 			qla24xx_async_gnnft_done(vha, e->u.iosb.sp);
e                5298 drivers/scsi/qla2xxx/qla_os.c 			qla24xx_async_gnnid(vha, e->u.fcport.fcport);
e                5301 drivers/scsi/qla2xxx/qla_os.c 			qla24xx_async_gfpnid(vha, e->u.fcport.fcport);
e                5304 drivers/scsi/qla2xxx/qla_os.c 			qla_sp_retry(vha, e);
e                5307 drivers/scsi/qla2xxx/qla_os.c 			qla_do_iidma_work(vha, e->u.fcport.fcport);
e                5311 drivers/scsi/qla2xxx/qla_os.c 			    e->u.fcport.fcport, false);
e                5322 drivers/scsi/qla2xxx/qla_os.c 		list_del_init(&e->list);
e                5323 drivers/scsi/qla2xxx/qla_os.c 		if (e->flags & QLA_EVT_FLAG_FREE)
e                5324 drivers/scsi/qla2xxx/qla_os.c 			kfree(e);
e                5333 drivers/scsi/qla2xxx/qla_os.c 	struct qla_work_evt *e;
e                5335 drivers/scsi/qla2xxx/qla_os.c 	e = qla2x00_alloc_work(vha, QLA_EVT_RELOGIN);
e                5337 drivers/scsi/qla2xxx/qla_os.c 	if (!e) {
e                5342 drivers/scsi/qla2xxx/qla_os.c 	return qla2x00_post_work(vha, e);
e                 550 drivers/scsi/qla2xxx/qla_target.c 	struct qla_work_evt *e;
e                 552 drivers/scsi/qla2xxx/qla_target.c 	e = qla2x00_alloc_work(vha, QLA_EVT_NACK);
e                 553 drivers/scsi/qla2xxx/qla_target.c 	if (!e)
e                 556 drivers/scsi/qla2xxx/qla_target.c 	e->u.nack.fcport = fcport;
e                 557 drivers/scsi/qla2xxx/qla_target.c 	e->u.nack.type = type;
e                 558 drivers/scsi/qla2xxx/qla_target.c 	memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp));
e                 559 drivers/scsi/qla2xxx/qla_target.c 	return qla2x00_post_work(vha, e);
e                 670 drivers/scsi/qla2xxx/qla_target.c void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
e                 674 drivers/scsi/qla2xxx/qla_target.c 	switch (e->u.nack.type) {
e                 676 drivers/scsi/qla2xxx/qla_target.c 		t = e->u.nack.fcport;
e                 680 drivers/scsi/qla2xxx/qla_target.c 		t = qlt_create_sess(vha, e->u.nack.fcport, 0);
e                 686 drivers/scsi/qla2xxx/qla_target.c 			vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
e                 690 drivers/scsi/qla2xxx/qla_target.c 	qla24xx_async_notify_ack(vha, e->u.nack.fcport,
e                 691 drivers/scsi/qla2xxx/qla_target.c 	    (struct imm_ntfy_from_isp *)e->u.nack.iocb, e->u.nack.type);
e                5171 drivers/scsi/qla4xxx/ql4_os.c 	struct qla4_work_evt *e;
e                5174 drivers/scsi/qla4xxx/ql4_os.c 	e = kzalloc(size, GFP_ATOMIC);
e                5175 drivers/scsi/qla4xxx/ql4_os.c 	if (!e)
e                5178 drivers/scsi/qla4xxx/ql4_os.c 	INIT_LIST_HEAD(&e->list);
e                5179 drivers/scsi/qla4xxx/ql4_os.c 	e->type = type;
e                5180 drivers/scsi/qla4xxx/ql4_os.c 	return e;
e                5184 drivers/scsi/qla4xxx/ql4_os.c 			     struct qla4_work_evt *e)
e                5189 drivers/scsi/qla4xxx/ql4_os.c 	list_add_tail(&e->list, &ha->work_list);
e                5198 drivers/scsi/qla4xxx/ql4_os.c 	struct qla4_work_evt *e;
e                5200 drivers/scsi/qla4xxx/ql4_os.c 	e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN);
e                5201 drivers/scsi/qla4xxx/ql4_os.c 	if (!e)
e                5204 drivers/scsi/qla4xxx/ql4_os.c 	e->u.aen.code = aen_code;
e                5205 drivers/scsi/qla4xxx/ql4_os.c 	e->u.aen.data_size = data_size;
e                5206 drivers/scsi/qla4xxx/ql4_os.c 	memcpy(e->u.aen.data, data, data_size);
e                5208 drivers/scsi/qla4xxx/ql4_os.c 	qla4xxx_post_work(ha, e);
e                5217 drivers/scsi/qla4xxx/ql4_os.c 	struct qla4_work_evt *e;
e                5219 drivers/scsi/qla4xxx/ql4_os.c 	e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS);
e                5220 drivers/scsi/qla4xxx/ql4_os.c 	if (!e)
e                5223 drivers/scsi/qla4xxx/ql4_os.c 	e->u.ping.status = status;
e                5224 drivers/scsi/qla4xxx/ql4_os.c 	e->u.ping.pid = pid;
e                5225 drivers/scsi/qla4xxx/ql4_os.c 	e->u.ping.data_size = data_size;
e                5226 drivers/scsi/qla4xxx/ql4_os.c 	memcpy(e->u.ping.data, data, data_size);
e                5228 drivers/scsi/qla4xxx/ql4_os.c 	qla4xxx_post_work(ha, e);
e                5235 drivers/scsi/qla4xxx/ql4_os.c 	struct qla4_work_evt *e, *tmp;
e                5243 drivers/scsi/qla4xxx/ql4_os.c 	list_for_each_entry_safe(e, tmp, &work, list) {
e                5244 drivers/scsi/qla4xxx/ql4_os.c 		list_del_init(&e->list);
e                5246 drivers/scsi/qla4xxx/ql4_os.c 		switch (e->type) {
e                5250 drivers/scsi/qla4xxx/ql4_os.c 					      e->u.aen.code,
e                5251 drivers/scsi/qla4xxx/ql4_os.c 					      e->u.aen.data_size,
e                5252 drivers/scsi/qla4xxx/ql4_os.c 					      e->u.aen.data);
e                5257 drivers/scsi/qla4xxx/ql4_os.c 					      e->u.ping.status,
e                5258 drivers/scsi/qla4xxx/ql4_os.c 					      e->u.ping.pid,
e                5259 drivers/scsi/qla4xxx/ql4_os.c 					      e->u.ping.data_size,
e                5260 drivers/scsi/qla4xxx/ql4_os.c 					      e->u.ping.data);
e                5264 drivers/scsi/qla4xxx/ql4_os.c 				   "supported", e->type);
e                5266 drivers/scsi/qla4xxx/ql4_os.c 		kfree(e);
e                 351 drivers/scsi/vmw_pvscsi.c 			      struct PVSCSIRingReqDesc *e)
e                 357 drivers/scsi/vmw_pvscsi.c 	e->dataLen = bufflen;
e                 358 drivers/scsi/vmw_pvscsi.c 	e->dataAddr = 0;
e                 374 drivers/scsi/vmw_pvscsi.c 			e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
e                 384 drivers/scsi/vmw_pvscsi.c 			e->dataAddr = ctx->sglPA;
e                 386 drivers/scsi/vmw_pvscsi.c 			e->dataAddr = sg_dma_address(sg);
e                 399 drivers/scsi/vmw_pvscsi.c 		e->dataAddr = ctx->dataPA;
e                 535 drivers/scsi/vmw_pvscsi.c 				    const struct PVSCSIRingCmpDesc *e)
e                 540 drivers/scsi/vmw_pvscsi.c 	u32 btstat = e->hostStatus;
e                 541 drivers/scsi/vmw_pvscsi.c 	u32 sdstat = e->scsiStatus;
e                 543 drivers/scsi/vmw_pvscsi.c 	ctx = pvscsi_get_context(adapter, e->context);
e                 584 drivers/scsi/vmw_pvscsi.c 			scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
e                 652 drivers/scsi/vmw_pvscsi.c 		struct PVSCSIRingCmpDesc *e = ring + (s->cmpConsIdx &
e                 661 drivers/scsi/vmw_pvscsi.c 		pvscsi_complete_request(adapter, e);
e                 680 drivers/scsi/vmw_pvscsi.c 	struct PVSCSIRingReqDesc *e;
e                 703 drivers/scsi/vmw_pvscsi.c 	e = adapter->req_ring + (s->reqProdIdx & MASK(req_entries));
e                 705 drivers/scsi/vmw_pvscsi.c 	e->bus    = sdev->channel;
e                 706 drivers/scsi/vmw_pvscsi.c 	e->target = sdev->id;
e                 707 drivers/scsi/vmw_pvscsi.c 	memset(e->lun, 0, sizeof(e->lun));
e                 708 drivers/scsi/vmw_pvscsi.c 	e->lun[1] = sdev->lun;
e                 720 drivers/scsi/vmw_pvscsi.c 		e->senseAddr = ctx->sensePA;
e                 721 drivers/scsi/vmw_pvscsi.c 		e->senseLen = SCSI_SENSE_BUFFERSIZE;
e                 723 drivers/scsi/vmw_pvscsi.c 		e->senseLen  = 0;
e                 724 drivers/scsi/vmw_pvscsi.c 		e->senseAddr = 0;
e                 726 drivers/scsi/vmw_pvscsi.c 	e->cdbLen   = cmd->cmd_len;
e                 727 drivers/scsi/vmw_pvscsi.c 	e->vcpuHint = smp_processor_id();
e                 728 drivers/scsi/vmw_pvscsi.c 	memcpy(e->cdb, cmd->cmnd, e->cdbLen);
e                 730 drivers/scsi/vmw_pvscsi.c 	e->tag = SIMPLE_QUEUE_TAG;
e                 733 drivers/scsi/vmw_pvscsi.c 		e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST;
e                 735 drivers/scsi/vmw_pvscsi.c 		e->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE;
e                 737 drivers/scsi/vmw_pvscsi.c 		e->flags = PVSCSI_FLAG_CMD_DIR_NONE;
e                 739 drivers/scsi/vmw_pvscsi.c 		e->flags = 0;
e                 741 drivers/scsi/vmw_pvscsi.c 	if (pvscsi_map_buffers(adapter, ctx, cmd, e) != 0) {
e                 751 drivers/scsi/vmw_pvscsi.c 	e->context = pvscsi_map_context(adapter, ctx);
e                1020 drivers/scsi/vmw_pvscsi.c 			       const struct PVSCSIRingMsgDesc *e)
e                1027 drivers/scsi/vmw_pvscsi.c 	       e->type, s->msgProdIdx, s->msgConsIdx, s->msgNumEntriesLog2);
e                1031 drivers/scsi/vmw_pvscsi.c 	if (e->type == PVSCSI_MSG_DEV_ADDED) {
e                1033 drivers/scsi/vmw_pvscsi.c 		desc = (struct PVSCSIMsgDescDevStatusChanged *)e;
e                1052 drivers/scsi/vmw_pvscsi.c 	} else if (e->type == PVSCSI_MSG_DEV_REMOVED) {
e                1054 drivers/scsi/vmw_pvscsi.c 		desc = (struct PVSCSIMsgDescDevStatusChanged *)e;
e                1091 drivers/scsi/vmw_pvscsi.c 		struct PVSCSIRingMsgDesc *e = ring + (s->msgConsIdx &
e                1095 drivers/scsi/vmw_pvscsi.c 		pvscsi_process_msg(adapter, e);
e                 287 drivers/soc/fsl/qbman/bman.c static int rcr_ptr2idx(struct bm_rcr_entry *e)
e                 289 drivers/soc/fsl/qbman/bman.c 	return ((uintptr_t)e >> RCR_SHIFT) & (BM_RCR_SIZE - 1);
e                 405 drivers/soc/fsl/qbman/qman.c static int eqcr_ptr2idx(struct qm_eqcr_entry *e)
e                 407 drivers/soc/fsl/qbman/qman.c 	return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1);
e                 590 drivers/soc/fsl/qbman/qman.c static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e)
e                 592 drivers/soc/fsl/qbman/qman.c 	return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1);
e                 595 drivers/soc/fsl/qbman/qman.c static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e)
e                 597 drivers/soc/fsl/qbman/qman.c 	return dqrr_carryclear(e + 1);
e                 757 drivers/soc/fsl/qbman/qman.c static inline int mr_ptr2idx(const union qm_mr_entry *e)
e                 759 drivers/soc/fsl/qbman/qman.c 	return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1);
e                 762 drivers/soc/fsl/qbman/qman.c static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e)
e                 764 drivers/soc/fsl/qbman/qman.c 	return mr_carryclear(e + 1);
e                1326 drivers/soc/fsl/qbman/qman.c 		const union qm_mr_entry *e = qm_mr_current(p);
e                1329 drivers/soc/fsl/qbman/qman.c 			e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd));
e                 290 drivers/soc/qcom/smem.c 	struct smem_private_entry *e;
e                 292 drivers/soc/qcom/smem.c 	return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*e), cacheline);
e                 312 drivers/soc/qcom/smem.c uncached_entry_next(struct smem_private_entry *e)
e                 314 drivers/soc/qcom/smem.c 	void *p = e;
e                 316 drivers/soc/qcom/smem.c 	return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) +
e                 317 drivers/soc/qcom/smem.c 	       le32_to_cpu(e->size);
e                 321 drivers/soc/qcom/smem.c cached_entry_next(struct smem_private_entry *e, size_t cacheline)
e                 323 drivers/soc/qcom/smem.c 	void *p = e;
e                 325 drivers/soc/qcom/smem.c 	return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline);
e                 328 drivers/soc/qcom/smem.c static void *uncached_entry_to_item(struct smem_private_entry *e)
e                 330 drivers/soc/qcom/smem.c 	void *p = e;
e                 332 drivers/soc/qcom/smem.c 	return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
e                 335 drivers/soc/qcom/smem.c static void *cached_entry_to_item(struct smem_private_entry *e)
e                 337 drivers/soc/qcom/smem.c 	void *p = e;
e                 339 drivers/soc/qcom/smem.c 	return p - le32_to_cpu(e->size);
e                 517 drivers/soc/qcom/smem.c 	struct smem_private_entry *e, *end;
e                 519 drivers/soc/qcom/smem.c 	e = phdr_to_first_uncached_entry(phdr);
e                 522 drivers/soc/qcom/smem.c 	while (e < end) {
e                 523 drivers/soc/qcom/smem.c 		if (e->canary != SMEM_PRIVATE_CANARY)
e                 526 drivers/soc/qcom/smem.c 		if (le16_to_cpu(e->item) == item) {
e                 528 drivers/soc/qcom/smem.c 				*size = le32_to_cpu(e->size) -
e                 529 drivers/soc/qcom/smem.c 					le16_to_cpu(e->padding_data);
e                 531 drivers/soc/qcom/smem.c 			return uncached_entry_to_item(e);
e                 534 drivers/soc/qcom/smem.c 		e = uncached_entry_next(e);
e                 539 drivers/soc/qcom/smem.c 	e = phdr_to_first_cached_entry(phdr, cacheline);
e                 542 drivers/soc/qcom/smem.c 	while (e > end) {
e                 543 drivers/soc/qcom/smem.c 		if (e->canary != SMEM_PRIVATE_CANARY)
e                 546 drivers/soc/qcom/smem.c 		if (le16_to_cpu(e->item) == item) {
e                 548 drivers/soc/qcom/smem.c 				*size = le32_to_cpu(e->size) -
e                 549 drivers/soc/qcom/smem.c 					le16_to_cpu(e->padding_data);
e                 551 drivers/soc/qcom/smem.c 			return cached_entry_to_item(e);
e                 554 drivers/soc/qcom/smem.c 		e = cached_entry_next(e, cacheline);
e                  17 drivers/soc/qcom/trace-rpmh.h 	TP_PROTO(struct rsc_drv *d, int m, const struct tcs_request *r, int e),
e                  19 drivers/soc/qcom/trace-rpmh.h 	TP_ARGS(d, m, r, e),
e                  34 drivers/soc/qcom/trace-rpmh.h 		       __entry->err = e;
e                  77 drivers/ssb/driver_chipcommon_pmu.c 	const struct pmu0_plltab_entry *e;
e                  81 drivers/ssb/driver_chipcommon_pmu.c 		e = &pmu0_plltab[i];
e                  82 drivers/ssb/driver_chipcommon_pmu.c 		if (e->freq == crystalfreq)
e                  83 drivers/ssb/driver_chipcommon_pmu.c 			return e;
e                  94 drivers/ssb/driver_chipcommon_pmu.c 	const struct pmu0_plltab_entry *e = NULL;
e                  99 drivers/ssb/driver_chipcommon_pmu.c 		e = pmu0_plltab_find_entry(crystalfreq);
e                 100 drivers/ssb/driver_chipcommon_pmu.c 	if (!e)
e                 101 drivers/ssb/driver_chipcommon_pmu.c 		e = pmu0_plltab_find_entry(SSB_PMU0_DEFAULT_XTALFREQ);
e                 102 drivers/ssb/driver_chipcommon_pmu.c 	BUG_ON(!e);
e                 103 drivers/ssb/driver_chipcommon_pmu.c 	crystalfreq = e->freq;
e                 104 drivers/ssb/driver_chipcommon_pmu.c 	cc->pmu.crystalfreq = e->freq;
e                 108 drivers/ssb/driver_chipcommon_pmu.c 	if (((pmuctl & SSB_CHIPCO_PMU_CTL_XTALFREQ) >> SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT) == e->xf) {
e                 155 drivers/ssb/driver_chipcommon_pmu.c 	pllctl |= ((u32)e->wb_int << SSB_PMU0_PLLCTL1_WILD_IMSK_SHIFT) & SSB_PMU0_PLLCTL1_WILD_IMSK;
e                 156 drivers/ssb/driver_chipcommon_pmu.c 	pllctl |= ((u32)e->wb_frac << SSB_PMU0_PLLCTL1_WILD_FMSK_SHIFT) & SSB_PMU0_PLLCTL1_WILD_FMSK;
e                 157 drivers/ssb/driver_chipcommon_pmu.c 	if (e->wb_frac == 0)
e                 164 drivers/ssb/driver_chipcommon_pmu.c 	pllctl |= (((u32)e->wb_int >> 4) << SSB_PMU0_PLLCTL2_WILD_IMSKHI_SHIFT) & SSB_PMU0_PLLCTL2_WILD_IMSKHI;
e                 173 drivers/ssb/driver_chipcommon_pmu.c 	pmuctl |= ((u32)e->xf << SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT) & SSB_CHIPCO_PMU_CTL_XTALFREQ;
e                 208 drivers/ssb/driver_chipcommon_pmu.c 	const struct pmu1_plltab_entry *e;
e                 212 drivers/ssb/driver_chipcommon_pmu.c 		e = &pmu1_plltab[i];
e                 213 drivers/ssb/driver_chipcommon_pmu.c 		if (e->freq == crystalfreq)
e                 214 drivers/ssb/driver_chipcommon_pmu.c 			return e;
e                 225 drivers/ssb/driver_chipcommon_pmu.c 	const struct pmu1_plltab_entry *e = NULL;
e                 238 drivers/ssb/driver_chipcommon_pmu.c 		e = pmu1_plltab_find_entry(crystalfreq);
e                 239 drivers/ssb/driver_chipcommon_pmu.c 	if (!e)
e                 240 drivers/ssb/driver_chipcommon_pmu.c 		e = pmu1_plltab_find_entry(SSB_PMU1_DEFAULT_XTALFREQ);
e                 241 drivers/ssb/driver_chipcommon_pmu.c 	BUG_ON(!e);
e                 242 drivers/ssb/driver_chipcommon_pmu.c 	crystalfreq = e->freq;
e                 243 drivers/ssb/driver_chipcommon_pmu.c 	cc->pmu.crystalfreq = e->freq;
e                 247 drivers/ssb/driver_chipcommon_pmu.c 	if (((pmuctl & SSB_CHIPCO_PMU_CTL_XTALFREQ) >> SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT) == e->xf) {
e                 283 drivers/ssb/driver_chipcommon_pmu.c 	pllctl |= ((u32)e->p1div << SSB_PMU1_PLLCTL0_P1DIV_SHIFT) & SSB_PMU1_PLLCTL0_P1DIV;
e                 284 drivers/ssb/driver_chipcommon_pmu.c 	pllctl |= ((u32)e->p2div << SSB_PMU1_PLLCTL0_P2DIV_SHIFT) & SSB_PMU1_PLLCTL0_P2DIV;
e                 290 drivers/ssb/driver_chipcommon_pmu.c 	pllctl |= ((u32)e->ndiv_int << SSB_PMU1_PLLCTL2_NDIVINT_SHIFT) & SSB_PMU1_PLLCTL2_NDIVINT;
e                 297 drivers/ssb/driver_chipcommon_pmu.c 	pllctl |= ((u32)e->ndiv_frac << SSB_PMU1_PLLCTL3_NDIVFRAC_SHIFT) & SSB_PMU1_PLLCTL3_NDIVFRAC;
e                 311 drivers/ssb/driver_chipcommon_pmu.c 	pmuctl |= ((((u32)e->freq + 127) / 128 - 1) << SSB_CHIPCO_PMU_CTL_ILP_DIV_SHIFT)
e                 313 drivers/ssb/driver_chipcommon_pmu.c 	pmuctl |= ((u32)e->xf << SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT) & SSB_CHIPCO_PMU_CTL_XTALFREQ;
e                 622 drivers/ssb/driver_chipcommon_pmu.c 	const struct pmu0_plltab_entry *e = NULL;
e                 626 drivers/ssb/driver_chipcommon_pmu.c 	e = pmu0_plltab_find_entry(crystalfreq);
e                 627 drivers/ssb/driver_chipcommon_pmu.c 	BUG_ON(!e);
e                 628 drivers/ssb/driver_chipcommon_pmu.c 	return e->freq * 1000;
e                  90 drivers/ssb/driver_chipcommon_sflash.c 	const struct ssb_sflash_tbl_e *e;
e                 107 drivers/ssb/driver_chipcommon_sflash.c 			for (e = ssb_sflash_sst_tbl; e->name; e++) {
e                 108 drivers/ssb/driver_chipcommon_sflash.c 				if (e->id == id2)
e                 115 drivers/ssb/driver_chipcommon_sflash.c 			for (e = ssb_sflash_st_tbl; e->name; e++) {
e                 116 drivers/ssb/driver_chipcommon_sflash.c 				if (e->id == id)
e                 121 drivers/ssb/driver_chipcommon_sflash.c 		if (!e->name) {
e                 132 drivers/ssb/driver_chipcommon_sflash.c 		for (e = ssb_sflash_at_tbl; e->name; e++) {
e                 133 drivers/ssb/driver_chipcommon_sflash.c 			if (e->id == id)
e                 136 drivers/ssb/driver_chipcommon_sflash.c 		if (!e->name) {
e                 149 drivers/ssb/driver_chipcommon_sflash.c 	sflash->blocksize = e->blocksize;
e                 150 drivers/ssb/driver_chipcommon_sflash.c 	sflash->numblocks = e->numblocks;
e                 155 drivers/ssb/driver_chipcommon_sflash.c 		e->name, sflash->size / 1024, e->blocksize, e->numblocks);
e                 557 drivers/staging/greybus/audio_topology.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                 590 drivers/staging/greybus/audio_topology.c 	if (e->shift_l != e->shift_r)
e                 602 drivers/staging/greybus/audio_topology.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                 616 drivers/staging/greybus/audio_topology.c 	if (ucontrol->value.enumerated.item[0] > e->max - 1)
e                 621 drivers/staging/greybus/audio_topology.c 	if (e->shift_l != e->shift_r) {
e                 622 drivers/staging/greybus/audio_topology.c 		if (ucontrol->value.enumerated.item[1] > e->max - 1)
e                 725 drivers/staging/greybus/audio_topology.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                 754 drivers/staging/greybus/audio_topology.c 	if (e->shift_l != e->shift_r)
e                 773 drivers/staging/greybus/audio_topology.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                 776 drivers/staging/greybus/audio_topology.c 	if (ucontrol->value.enumerated.item[0] > e->max - 1)
e                 806 drivers/staging/greybus/audio_topology.c 	val = mux << e->shift_l;
e                 807 drivers/staging/greybus/audio_topology.c 	mask = e->mask << e->shift_l;
e                 816 drivers/staging/greybus/audio_topology.c 	if (e->shift_l != e->shift_r) {
e                 817 drivers/staging/greybus/audio_topology.c 		if (ucontrol->value.enumerated.item[1] > e->max - 1)
e                 819 drivers/staging/greybus/audio_topology.c 		val |= ucontrol->value.enumerated.item[1] << e->shift_r;
e                 820 drivers/staging/greybus/audio_topology.c 		mask |= e->mask << e->shift_r;
e                 849 drivers/staging/greybus/audio_topology.c 			snd_soc_dapm_mux_update_power(widget, kcontrol, mux, e);
e                1108 drivers/staging/isdn/gigaset/ev-layer.c 	unsigned char *s, *e;
e                1380 drivers/staging/isdn/gigaset/ev-layer.c 			val = simple_strtoul(s, (char **) &e, 10);
e                1381 drivers/staging/isdn/gigaset/ev-layer.c 			if (val > INT_MAX || e == s)
e                1384 drivers/staging/isdn/gigaset/ev-layer.c 				if (*e)
e                1386 drivers/staging/isdn/gigaset/ev-layer.c 			} else if (*e != '.')
e                1389 drivers/staging/isdn/gigaset/ev-layer.c 				s = e + 1;
e                 184 drivers/staging/ks7010/ks_wlan_net.c 	if ((fwrq->freq.e == 1) &&
e                 192 drivers/staging/ks7010/ks_wlan_net.c 		fwrq->freq.e = 0;
e                 196 drivers/staging/ks7010/ks_wlan_net.c 	if ((fwrq->freq.m > 1000) || (fwrq->freq.e > 0))
e                 233 drivers/staging/ks7010/ks_wlan_net.c 	fwrq->freq.e = 1;
e                 926 drivers/staging/ks7010/ks_wlan_net.c 		range->freq[k++].e = 1;	/* Values in table in MHz -> * 10^5 * 10 */
e                 933 drivers/staging/ks7010/ks_wlan_net.c 		range->freq[13].e = 1;	/* Values in table in MHz -> * 10^5 * 10 */
e                1222 drivers/staging/ks7010/ks_wlan_net.c 	iwe.u.freq.e = 1;
e                2240 drivers/staging/media/ipu3/ipu3-css.c 		unsigned int e = IPU3_CSS_RECT_ENVELOPE;
e                2253 drivers/staging/media/ipu3/ipu3-css.c 				css_pipe->rect[e].width,
e                2254 drivers/staging/media/ipu3/ipu3-css.c 				css_pipe->rect[e].height);
e                 804 drivers/staging/media/omap4iss/iss_video.c iss_video_expbuf(struct file *file, void *fh, struct v4l2_exportbuffer *e)
e                 808 drivers/staging/media/omap4iss/iss_video.c 	return vb2_expbuf(&vfh->queue, e);
e                 567 drivers/staging/olpc_dcon/olpc_dcon.c 			     unsigned long e, void *p)
e                 187 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 	iwe.u.freq.e = 1;
e                 693 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 		wrqu->freq.e = 1;
e                 697 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 		wrqu->freq.e = 1;
e                 909 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 			range->freq[val].e = 1;
e                 358 drivers/staging/rtl8192e/rtl8192e/rtl_wx.c 			range->freq[val].e = 1;
e                  43 drivers/staging/rtl8192e/rtllib_softmac_wx.c 	if (fwrq->e == 1) {
e                  53 drivers/staging/rtl8192e/rtllib_softmac_wx.c 			fwrq->e = 0;
e                  58 drivers/staging/rtl8192e/rtllib_softmac_wx.c 	if (fwrq->e > 0 || fwrq->m > 14 || fwrq->m < 1) {
e                  97 drivers/staging/rtl8192e/rtllib_softmac_wx.c 	fwrq->e = 1;
e                 100 drivers/staging/rtl8192e/rtllib_wx.c 	iwe.u.freq.e = 0;
e                 277 drivers/staging/rtl8192u/ieee80211/ieee80211_module.c 	struct proc_dir_entry *e;
e                 287 drivers/staging/rtl8192u/ieee80211/ieee80211_module.c 	e = proc_create("debug_level", 0644, ieee80211_proc, &fops);
e                 288 drivers/staging/rtl8192u/ieee80211/ieee80211_module.c 	if (!e) {
e                  44 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c 	if (fwrq->e == 1) {
e                  54 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c 			fwrq->e = 0;
e                  59 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c 	if (fwrq->e > 0 || fwrq->m > 14 || fwrq->m < 1) {
e                  96 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c 	fwrq->e = 1;
e                  98 drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c 	iwe.u.freq.e = 0;
e                 302 drivers/staging/rtl8192u/r8192U_wx.c 			range->freq[val].e = 1;
e                 293 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 	iwe.u.freq.e = (s16)1;
e                 667 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 	if ((fwrq->e == 1) &&
e                 675 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 		fwrq->e = 0;
e                 679 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 	if ((fwrq->m > 14) || (fwrq->e > 0)) {
e                 707 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 	wrqu->freq.e = 1;
e                 903 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 		range->freq[val].e = 1;
e                2316 drivers/staging/rtl8723bs/core/rtw_security.c 	size_t i, e, left, total_len;
e                2324 drivers/staging/rtl8723bs/core/rtw_security.c 	for (e = 0; e < num_elem; e++)
e                2325 drivers/staging/rtl8723bs/core/rtw_security.c 		total_len += len[e];
e                2328 drivers/staging/rtl8723bs/core/rtw_security.c 	e = 0;
e                2336 drivers/staging/rtl8723bs/core/rtw_security.c 				e++;
e                2337 drivers/staging/rtl8723bs/core/rtw_security.c 				pos = addr[e];
e                2338 drivers/staging/rtl8723bs/core/rtw_security.c 				end = pos + len[e];
e                2354 drivers/staging/rtl8723bs/core/rtw_security.c 				e++;
e                2355 drivers/staging/rtl8723bs/core/rtw_security.c 				pos = addr[e];
e                2356 drivers/staging/rtl8723bs/core/rtw_security.c 				end = pos + len[e];
e                 181 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 	iwe.u.freq.e = 1;
e                 833 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 		wrqu->freq.e = 1;
e                 838 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 		wrqu->freq.e = 1;
e                1082 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 			range->freq[val].e = 1;
e                 141 drivers/staging/wusbcore/host/whci/whci-hc.h #define QH_INFO1_EP(e)           ((e) << 0)  /* endpoint number */
e                 343 drivers/tty/serial/sc16is7xx.c #define to_sc16is7xx_port(p,e)	((container_of((p), struct sc16is7xx_port, e)))
e                 344 drivers/tty/serial/sc16is7xx.c #define to_sc16is7xx_one(p,e)	((container_of((p), struct sc16is7xx_one, e)))
e                  55 drivers/tty/vt/selection.c static inline void highlight(const int s, const int e)
e                  57 drivers/tty/vt/selection.c 	invert_screen(sel_cons, s, e-s+2, 1);
e                2545 drivers/tty/vt/vt.c 	struct interval e = *(struct interval *) elt;
e                2547 drivers/tty/vt/vt.c 	if (ucs > e.last)
e                2549 drivers/tty/vt/vt.c 	else if (ucs < e.first)
e                 833 drivers/usb/core/devio.c 	unsigned int i, j, e;
e                 846 drivers/usb/core/devio.c 			for (e = 0; e < alts->desc.bNumEndpoints; e++) {
e                 847 drivers/usb/core/devio.c 				endpt = &alts->endpoint[e].desc;
e                 837 drivers/usb/early/ehci-dbgp.c 	char *e;
e                 846 drivers/usb/early/ehci-dbgp.c 		dbgp_num = simple_strtoul(s, &e, 10);
e                  92 drivers/usb/gadget/legacy/hid.c 	struct hidg_func_node *e, *n;
e                 100 drivers/usb/gadget/legacy/hid.c 	list_for_each_entry(e, &hidg_func_list, node) {
e                 101 drivers/usb/gadget/legacy/hid.c 		e->f = usb_get_function(e->fi);
e                 102 drivers/usb/gadget/legacy/hid.c 		if (IS_ERR(e->f))
e                 104 drivers/usb/gadget/legacy/hid.c 		status = usb_add_function(c, e->f);
e                 106 drivers/usb/gadget/legacy/hid.c 			usb_put_function(e->f);
e                 114 drivers/usb/gadget/legacy/hid.c 		if (n == e)
e                 240 drivers/usb/gadget/legacy/hid.c 	struct hidg_func_node *e, *n;
e                 242 drivers/usb/gadget/legacy/hid.c 	list_for_each_entry_safe(e, n, &hidg_func_list, node) {
e                 243 drivers/usb/gadget/legacy/hid.c 		list_del(&e->node);
e                 244 drivers/usb/gadget/legacy/hid.c 		kfree(e);
e                 262 drivers/usb/gadget/udc/bdc/bdc.h #define to_bdc_ep(e)		container_of(e, struct bdc_ep, usb_ep)
e                 272 drivers/usb/gadget/udc/lpc32xx_udc.c #define USBD_RX_EP_SEL(e)	(1 << ((e) << 1))
e                 275 drivers/usb/gadget/udc/lpc32xx_udc.c #define USBD_TX_EP_SEL(e)	(1 << (((e) << 1) + 1))
e                 286 drivers/usb/gadget/udc/lpc32xx_udc.c #define USBD_EP_SEL(e)		(1 << (e))
e                 305 drivers/usb/gadget/udc/lpc32xx_udc.c #define USBD_LOG_ENDPOINT(e)	((e) << 2)
e                 318 drivers/usb/gadget/udc/lpc32xx_udc.c #define USBD_DMAEP(e)		(1 << (e))
e                1809 drivers/usb/gadget/udc/net2272.c 			struct net2272_ep *e;
e                1814 drivers/usb/gadget/udc/net2272.c 				e = net2272_get_ep_by_addr(dev, u.r.wIndex);
e                1815 drivers/usb/gadget/udc/net2272.c 				if (!e || u.r.wLength > 2)
e                1817 drivers/usb/gadget/udc/net2272.c 				if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT))
e                1859 drivers/usb/gadget/udc/net2272.c 			struct net2272_ep *e;
e                1866 drivers/usb/gadget/udc/net2272.c 			e = net2272_get_ep_by_addr(dev, u.r.wIndex);
e                1867 drivers/usb/gadget/udc/net2272.c 			if (!e)
e                1869 drivers/usb/gadget/udc/net2272.c 			if (e->wedged) {
e                1874 drivers/usb/gadget/udc/net2272.c 				clear_halt(e);
e                1880 drivers/usb/gadget/udc/net2272.c 			struct net2272_ep *e;
e                1893 drivers/usb/gadget/udc/net2272.c 			e = net2272_get_ep_by_addr(dev, u.r.wIndex);
e                1894 drivers/usb/gadget/udc/net2272.c 			if (!e)
e                1896 drivers/usb/gadget/udc/net2272.c 			set_halt(e);
e                2871 drivers/usb/gadget/udc/net2280.c 		struct net2280_ep *e;
e                2893 drivers/usb/gadget/udc/net2280.c 			e = get_ep_by_addr(dev, w_index);
e                2894 drivers/usb/gadget/udc/net2280.c 			if (!e)
e                2896 drivers/usb/gadget/udc/net2280.c 			status = readl(&e->regs->ep_rsp) &
e                2953 drivers/usb/gadget/udc/net2280.c 			e = get_ep_by_addr(dev,	w_index);
e                2954 drivers/usb/gadget/udc/net2280.c 			if (!e)
e                2958 drivers/usb/gadget/udc/net2280.c 			ep_vdbg(dev, "%s clear halt\n", e->ep.name);
e                2963 drivers/usb/gadget/udc/net2280.c 			ep_clear_seqnum(e);
e                2964 drivers/usb/gadget/udc/net2280.c 			clear_halt(e);
e                2965 drivers/usb/gadget/udc/net2280.c 			if (!list_empty(&e->queue) && e->td_dma)
e                2966 drivers/usb/gadget/udc/net2280.c 				restart_dma(e);
e                3019 drivers/usb/gadget/udc/net2280.c 			e = get_ep_by_addr(dev,	w_index);
e                3020 drivers/usb/gadget/udc/net2280.c 			if (!e || (w_value != USB_ENDPOINT_HALT))
e                3211 drivers/usb/gadget/udc/net2280.c 			struct net2280_ep	*e;
e                3217 drivers/usb/gadget/udc/net2280.c 			e = get_ep_by_addr(dev, w_index);
e                3218 drivers/usb/gadget/udc/net2280.c 			if (!e || w_length > 2)
e                3221 drivers/usb/gadget/udc/net2280.c 			if (readl(&e->regs->ep_rsp) & BIT(SET_ENDPOINT_HALT))
e                3236 drivers/usb/gadget/udc/net2280.c 			struct net2280_ep	*e;
e                3243 drivers/usb/gadget/udc/net2280.c 			e = get_ep_by_addr(dev, w_index);
e                3244 drivers/usb/gadget/udc/net2280.c 			if (!e)
e                3246 drivers/usb/gadget/udc/net2280.c 			if (e->wedged) {
e                3250 drivers/usb/gadget/udc/net2280.c 				ep_vdbg(dev, "%s clear halt\n", e->ep.name);
e                3251 drivers/usb/gadget/udc/net2280.c 				clear_halt(e);
e                3253 drivers/usb/gadget/udc/net2280.c 					!list_empty(&e->queue) && e->td_dma)
e                3254 drivers/usb/gadget/udc/net2280.c 						restart_dma(e);
e                3261 drivers/usb/gadget/udc/net2280.c 			struct net2280_ep	*e;
e                3268 drivers/usb/gadget/udc/net2280.c 			e = get_ep_by_addr(dev, w_index);
e                3269 drivers/usb/gadget/udc/net2280.c 			if (!e)
e                3271 drivers/usb/gadget/udc/net2280.c 			if (e->ep.name == ep0name)
e                3273 drivers/usb/gadget/udc/net2280.c 			set_halt(e);
e                3274 drivers/usb/gadget/udc/net2280.c 			if ((dev->quirks & PLX_PCIE) && e->dma)
e                3275 drivers/usb/gadget/udc/net2280.c 				abort_dma(e);
e                 404 drivers/usb/host/ehci-timer.c 	unsigned	e;
e                 417 drivers/usb/host/ehci-timer.c 	for_each_set_bit(e, &events, EHCI_HRTIMER_NUM_EVENTS) {
e                 418 drivers/usb/host/ehci-timer.c 		if (ktime_compare(now, ehci->hr_timeouts[e]) >= 0)
e                 419 drivers/usb/host/ehci-timer.c 			event_handlers[e](ehci);
e                 421 drivers/usb/host/ehci-timer.c 			ehci_enable_event(ehci, e, false);
e                 651 drivers/usb/host/ehci.h #define	ehci_is_TDI(e)			(ehci_to_hcd(e)->has_tt)
e                 673 drivers/usb/host/ehci.h #define	ehci_is_TDI(e)			(0)
e                 684 drivers/usb/host/ehci.h #define	ehci_has_fsl_portno_bug(e)		((e)->has_fsl_port_bug)
e                 686 drivers/usb/host/ehci.h #define	ehci_has_fsl_portno_bug(e)		(0)
e                 695 drivers/usb/host/ehci.h #define ehci_has_fsl_hs_errata(e)	((e)->has_fsl_hs_errata)
e                 697 drivers/usb/host/ehci.h #define ehci_has_fsl_hs_errata(e)	(0)
e                 705 drivers/usb/host/ehci.h #define ehci_has_fsl_susp_errata(e)	((e)->has_fsl_susp_errata)
e                 722 drivers/usb/host/ehci.h #define ehci_big_endian_mmio(e)		((e)->big_endian_mmio)
e                 723 drivers/usb/host/ehci.h #define ehci_big_endian_capbase(e)	((e)->big_endian_capbase)
e                 725 drivers/usb/host/ehci.h #define ehci_big_endian_mmio(e)		0
e                 726 drivers/usb/host/ehci.h #define ehci_big_endian_capbase(e)	0
e                 811 drivers/usb/host/ehci.h #define ehci_big_endian_desc(e)		((e)->big_endian_desc)
e                1346 drivers/usb/host/fotg210-hcd.c 	unsigned e;
e                1359 drivers/usb/host/fotg210-hcd.c 	for_each_set_bit(e, &events, FOTG210_HRTIMER_NUM_EVENTS) {
e                1360 drivers/usb/host/fotg210-hcd.c 		if (ktime_compare(now, fotg210->hr_timeouts[e]) >= 0)
e                1361 drivers/usb/host/fotg210-hcd.c 			event_handlers[e](fotg210);
e                1363 drivers/usb/host/fotg210-hcd.c 			fotg210_enable_event(fotg210, e, false);
e                 636 drivers/usb/host/fotg210.h #define	fotg210_has_fsl_portno_bug(e)		(0)
e                 649 drivers/usb/host/fotg210.h #define fotg210_big_endian_mmio(e)	0
e                 650 drivers/usb/host/fotg210.h #define fotg210_big_endian_capbase(e)	0
e                 131 drivers/usb/misc/usbtest.c 				   struct usb_host_endpoint *e)
e                 135 drivers/usb/misc/usbtest.c 			*in = e;
e                 138 drivers/usb/misc/usbtest.c 			*out = e;
e                 168 drivers/usb/misc/usbtest.c 			struct usb_host_endpoint	*e;
e                 171 drivers/usb/misc/usbtest.c 			e = alt->endpoint + ep;
e                 172 drivers/usb/misc/usbtest.c 			edi = usb_endpoint_dir_in(&e->desc);
e                 174 drivers/usb/misc/usbtest.c 			switch (usb_endpoint_type(&e->desc)) {
e                 176 drivers/usb/misc/usbtest.c 				endpoint_update(edi, &in, &out, e);
e                 180 drivers/usb/misc/usbtest.c 					endpoint_update(edi, &int_in, &int_out, e);
e                 184 drivers/usb/misc/usbtest.c 					endpoint_update(edi, &iso_in, &iso_out, e);
e                 900 drivers/usb/musb/musb_gadget_ep0.c static int musb_g_ep0_disable(struct usb_ep *e)
e                 907 drivers/usb/musb/musb_gadget_ep0.c musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags)
e                 916 drivers/usb/musb/musb_gadget_ep0.c 	if (!e || !r)
e                 919 drivers/usb/musb/musb_gadget_ep0.c 	ep = to_musb_ep(e);
e                 994 drivers/usb/musb/musb_gadget_ep0.c static int musb_g_ep0_halt(struct usb_ep *e, int value)
e                1003 drivers/usb/musb/musb_gadget_ep0.c 	if (!e || !value)
e                1006 drivers/usb/musb/musb_gadget_ep0.c 	ep = to_musb_ep(e);
e                 206 drivers/usb/renesas_usbhs/fifo.c #define usbhsf_irq_empty_ctrl(p, e) usbhsf_irq_callback_ctrl(p, irq_bempsts, e)
e                 207 drivers/usb/renesas_usbhs/fifo.c #define usbhsf_irq_ready_ctrl(p, e) usbhsf_irq_callback_ctrl(p, irq_brdysts, e)
e                  93 drivers/usb/renesas_usbhs/mod_gadget.c #define usbhsg_ep_to_uep(e)		container_of(e, struct usbhsg_uep, ep)
e                 131 drivers/usb/typec/tcpm/tcpm.c #define GENERATE_ENUM(e)	e
e                 534 drivers/video/fbdev/hyperv_fb.c 			 unsigned long e, void *p)
e                 164 drivers/video/fbdev/i810/i810-i2c.c 		const u8 *e = fb_firmware_edid(info->device);
e                 166 drivers/video/fbdev/i810/i810-i2c.c 		if (e != NULL) {
e                 168 drivers/video/fbdev/i810/i810-i2c.c 			edid = kmemdup(e, EDID_LENGTH, GFP_KERNEL);
e                 162 drivers/video/fbdev/nvidia/nv_i2c.c 		const u8 *e = fb_firmware_edid(info->device);
e                 164 drivers/video/fbdev/nvidia/nv_i2c.c 		if (e != NULL)
e                 165 drivers/video/fbdev/nvidia/nv_i2c.c 			edid = kmemdup(e, EDID_LENGTH, GFP_KERNEL);
e                1450 drivers/video/fbdev/omap2/omapfb/dss/apply.c 	bool e;
e                1454 drivers/video/fbdev/omap2/omapfb/dss/apply.c 	e = op->enabled;
e                1458 drivers/video/fbdev/omap2/omapfb/dss/apply.c 	return e;
e                1124 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	u32 e;
e                1126 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	e = dsi->errors;
e                1129 drivers/video/fbdev/omap2/omapfb/dss/dsi.c 	return e;
e                 230 drivers/video/fbdev/savage/savagefb-i2c.c 		const u8 *e = fb_firmware_edid(info->device);
e                 232 drivers/video/fbdev/savage/savagefb-i2c.c 		if (e)
e                 233 drivers/video/fbdev/savage/savagefb-i2c.c 			edid = kmemdup(e, EDID_LENGTH, GFP_KERNEL);
e                  87 drivers/xen/events/events_base.c #define EVTCHN_ROW(e)  (e / (PAGE_SIZE/sizeof(**evtchn_to_irq)))
e                  88 drivers/xen/events/events_base.c #define EVTCHN_COL(e)  (e % (PAGE_SIZE/sizeof(**evtchn_to_irq)))
e                 122 drivers/xen/xen-pciback/passthrough.c 	struct pci_dev_entry *dev_entry, *e;
e                 136 drivers/xen/xen-pciback/passthrough.c 			list_for_each_entry(e, &dev_data->dev_list, list) {
e                 137 drivers/xen/xen-pciback/passthrough.c 				if (dev == e->dev) {
e                 158 drivers/xen/xen-pciback/vpci.c 		struct pci_dev_entry *e;
e                 160 drivers/xen/xen-pciback/vpci.c 		list_for_each_entry(e, &vpci_dev->dev_list[slot], list) {
e                 161 drivers/xen/xen-pciback/vpci.c 			if (e->dev == dev) {
e                 162 drivers/xen/xen-pciback/vpci.c 				list_del(&e->list);
e                 163 drivers/xen/xen-pciback/vpci.c 				found_dev = e->dev;
e                 164 drivers/xen/xen-pciback/vpci.c 				kfree(e);
e                 214 drivers/xen/xen-pciback/vpci.c 		struct pci_dev_entry *e, *tmp;
e                 215 drivers/xen/xen-pciback/vpci.c 		list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
e                 217 drivers/xen/xen-pciback/vpci.c 			struct pci_dev *dev = e->dev;
e                 218 drivers/xen/xen-pciback/vpci.c 			list_del(&e->list);
e                 222 drivers/xen/xen-pciback/vpci.c 			kfree(e);
e                 168 fs/9p/v9fs.c   	char *s, *e;
e                 324 fs/9p/v9fs.c   				uid = simple_strtoul(s, &e, 10);
e                 325 fs/9p/v9fs.c   				if (*e != '\0') {
e                 170 fs/afs/fs_probe.c 	struct afs_error e;
e                 174 fs/afs/fs_probe.c 	e.error = 0;
e                 175 fs/afs/fs_probe.c 	e.responded = false;
e                 182 fs/afs/fs_probe.c 		    afs_do_probe_fileserver(net, server, key, i, &e))
e                 186 fs/afs/fs_probe.c 	return in_progress ? 0 : e.error;
e                 113 fs/afs/misc.c  void afs_prioritise_error(struct afs_error *e, int error, u32 abort_code)
e                 119 fs/afs/misc.c  		if (e->error == -ETIMEDOUT ||
e                 120 fs/afs/misc.c  		    e->error == -ETIME)
e                 125 fs/afs/misc.c  		if (e->error == -ENOMEM ||
e                 126 fs/afs/misc.c  		    e->error == -ENONET)
e                 131 fs/afs/misc.c  		if (e->error == -ERFKILL)
e                 135 fs/afs/misc.c  		if (e->error == -EADDRNOTAVAIL)
e                 139 fs/afs/misc.c  		if (e->error == -ENETUNREACH)
e                 143 fs/afs/misc.c  		if (e->error == -EHOSTUNREACH)
e                 147 fs/afs/misc.c  		if (e->error == -EHOSTDOWN)
e                 151 fs/afs/misc.c  		if (e->error == -ECONNREFUSED)
e                 155 fs/afs/misc.c  		if (e->error == -ECONNRESET)
e                 159 fs/afs/misc.c  		if (e->responded)
e                 161 fs/afs/misc.c  		e->error = error;
e                 165 fs/afs/misc.c  		e->responded = true;
e                 166 fs/afs/misc.c  		e->error = afs_abort_to_error(abort_code);
e                 145 fs/afs/rotate.c 	struct afs_error e;
e                 461 fs/afs/rotate.c 	e.error = -EDESTADDRREQ;
e                 462 fs/afs/rotate.c 	e.responded = false;
e                 466 fs/afs/rotate.c 		afs_prioritise_error(&e, READ_ONCE(s->probe.error),
e                 470 fs/afs/rotate.c 	error = e.error;
e                 169 fs/afs/vl_probe.c 	struct afs_error e;
e                 173 fs/afs/vl_probe.c 	e.error = 0;
e                 174 fs/afs/vl_probe.c 	e.responded = false;
e                 181 fs/afs/vl_probe.c 		    afs_do_probe_vlserver(net, server, key, i, &e))
e                 185 fs/afs/vl_probe.c 	return in_progress ? 0 : e.error;
e                  88 fs/afs/vl_rotate.c 	struct afs_error e;
e                 256 fs/afs/vl_rotate.c 	e.error = -EDESTADDRREQ;
e                 257 fs/afs/vl_rotate.c 	e.responded = false;
e                 261 fs/afs/vl_rotate.c 		afs_prioritise_error(&e, READ_ONCE(s->probe.error),
e                  97 fs/binfmt_misc.c 		Node *e = list_entry(l, Node, list);
e                 102 fs/binfmt_misc.c 		if (!test_bit(Enabled, &e->flags))
e                 106 fs/binfmt_misc.c 		if (!test_bit(Magic, &e->flags)) {
e                 107 fs/binfmt_misc.c 			if (p && !strcmp(e->magic, p + 1))
e                 108 fs/binfmt_misc.c 				return e;
e                 113 fs/binfmt_misc.c 		s = bprm->buf + e->offset;
e                 114 fs/binfmt_misc.c 		if (e->mask) {
e                 115 fs/binfmt_misc.c 			for (j = 0; j < e->size; j++)
e                 116 fs/binfmt_misc.c 				if ((*s++ ^ e->magic[j]) & e->mask[j])
e                 119 fs/binfmt_misc.c 			for (j = 0; j < e->size; j++)
e                 120 fs/binfmt_misc.c 				if ((*s++ ^ e->magic[j]))
e                 123 fs/binfmt_misc.c 		if (j == e->size)
e                 124 fs/binfmt_misc.c 			return e;
e                 277 fs/binfmt_misc.c static char *check_special_flags(char *sfs, Node *e)
e                 288 fs/binfmt_misc.c 			e->flags |= MISC_FMT_PRESERVE_ARGV0;
e                 293 fs/binfmt_misc.c 			e->flags |= MISC_FMT_OPEN_BINARY;
e                 300 fs/binfmt_misc.c 			e->flags |= (MISC_FMT_CREDENTIALS |
e                 306 fs/binfmt_misc.c 			e->flags |= MISC_FMT_OPEN_FILE;
e                 323 fs/binfmt_misc.c 	Node *e;
e                 337 fs/binfmt_misc.c 	e = kmalloc(memsize, GFP_KERNEL);
e                 338 fs/binfmt_misc.c 	if (!e)
e                 341 fs/binfmt_misc.c 	p = buf = (char *)e + sizeof(Node);
e                 343 fs/binfmt_misc.c 	memset(e, 0, sizeof(Node));
e                 355 fs/binfmt_misc.c 	e->name = p;
e                 360 fs/binfmt_misc.c 	if (!e->name[0] ||
e                 361 fs/binfmt_misc.c 	    !strcmp(e->name, ".") ||
e                 362 fs/binfmt_misc.c 	    !strcmp(e->name, "..") ||
e                 363 fs/binfmt_misc.c 	    strchr(e->name, '/'))
e                 366 fs/binfmt_misc.c 	pr_debug("register: name: {%s}\n", e->name);
e                 372 fs/binfmt_misc.c 		e->flags = 1 << Enabled;
e                 376 fs/binfmt_misc.c 		e->flags = (1 << Enabled) | (1 << Magic);
e                 384 fs/binfmt_misc.c 	if (test_bit(Magic, &e->flags)) {
e                 394 fs/binfmt_misc.c 			int r = kstrtoint(p, 10, &e->offset);
e                 395 fs/binfmt_misc.c 			if (r != 0 || e->offset < 0)
e                 401 fs/binfmt_misc.c 		pr_debug("register: offset: %#x\n", e->offset);
e                 404 fs/binfmt_misc.c 		e->magic = p;
e                 408 fs/binfmt_misc.c 		if (!e->magic[0])
e                 413 fs/binfmt_misc.c 				DUMP_PREFIX_NONE, e->magic, p - e->magic);
e                 416 fs/binfmt_misc.c 		e->mask = p;
e                 420 fs/binfmt_misc.c 		if (!e->mask[0]) {
e                 421 fs/binfmt_misc.c 			e->mask = NULL;
e                 426 fs/binfmt_misc.c 				DUMP_PREFIX_NONE, e->mask, p - e->mask);
e                 434 fs/binfmt_misc.c 		e->size = string_unescape_inplace(e->magic, UNESCAPE_HEX);
e                 435 fs/binfmt_misc.c 		if (e->mask &&
e                 436 fs/binfmt_misc.c 		    string_unescape_inplace(e->mask, UNESCAPE_HEX) != e->size)
e                 438 fs/binfmt_misc.c 		if (e->size > BINPRM_BUF_SIZE ||
e                 439 fs/binfmt_misc.c 		    BINPRM_BUF_SIZE - e->size < e->offset)
e                 441 fs/binfmt_misc.c 		pr_debug("register: magic/mask length: %i\n", e->size);
e                 445 fs/binfmt_misc.c 				DUMP_PREFIX_NONE, e->magic, e->size);
e                 447 fs/binfmt_misc.c 			if (e->mask) {
e                 449 fs/binfmt_misc.c 				char *masked = kmalloc(e->size, GFP_KERNEL);
e                 453 fs/binfmt_misc.c 					DUMP_PREFIX_NONE, e->mask, e->size);
e                 456 fs/binfmt_misc.c 					for (i = 0; i < e->size; ++i)
e                 457 fs/binfmt_misc.c 						masked[i] = e->magic[i] & e->mask[i];
e                 460 fs/binfmt_misc.c 						DUMP_PREFIX_NONE, masked, e->size);
e                 476 fs/binfmt_misc.c 		e->magic = p;
e                 481 fs/binfmt_misc.c 		if (!e->magic[0] || strchr(e->magic, '/'))
e                 483 fs/binfmt_misc.c 		pr_debug("register: extension: {%s}\n", e->magic);
e                 493 fs/binfmt_misc.c 	e->interpreter = p;
e                 498 fs/binfmt_misc.c 	if (!e->interpreter[0])
e                 500 fs/binfmt_misc.c 	pr_debug("register: interpreter: {%s}\n", e->interpreter);
e                 503 fs/binfmt_misc.c 	p = check_special_flags(p, e);
e                 509 fs/binfmt_misc.c 	return e;
e                 515 fs/binfmt_misc.c 	kfree(e);
e                 518 fs/binfmt_misc.c 	kfree(e);
e                 549 fs/binfmt_misc.c static void entry_status(Node *e, char *page)
e                 554 fs/binfmt_misc.c 	if (test_bit(Enabled, &e->flags))
e                 562 fs/binfmt_misc.c 	dp += sprintf(dp, "%s\ninterpreter %s\n", status, e->interpreter);
e                 566 fs/binfmt_misc.c 	if (e->flags & MISC_FMT_PRESERVE_ARGV0)
e                 568 fs/binfmt_misc.c 	if (e->flags & MISC_FMT_OPEN_BINARY)
e                 570 fs/binfmt_misc.c 	if (e->flags & MISC_FMT_CREDENTIALS)
e                 572 fs/binfmt_misc.c 	if (e->flags & MISC_FMT_OPEN_FILE)
e                 576 fs/binfmt_misc.c 	if (!test_bit(Magic, &e->flags)) {
e                 577 fs/binfmt_misc.c 		sprintf(dp, "extension .%s\n", e->magic);
e                 579 fs/binfmt_misc.c 		dp += sprintf(dp, "offset %i\nmagic ", e->offset);
e                 580 fs/binfmt_misc.c 		dp = bin2hex(dp, e->magic, e->size);
e                 581 fs/binfmt_misc.c 		if (e->mask) {
e                 583 fs/binfmt_misc.c 			dp = bin2hex(dp, e->mask, e->size);
e                 605 fs/binfmt_misc.c 	Node *e = inode->i_private;
e                 607 fs/binfmt_misc.c 	if (e && e->flags & MISC_FMT_OPEN_FILE)
e                 608 fs/binfmt_misc.c 		filp_close(e->interp_file, NULL);
e                 611 fs/binfmt_misc.c 	kfree(e);
e                 614 fs/binfmt_misc.c static void kill_node(Node *e)
e                 619 fs/binfmt_misc.c 	list_del_init(&e->list);
e                 622 fs/binfmt_misc.c 	dentry = e->dentry;
e                 634 fs/binfmt_misc.c 	Node *e = file_inode(file)->i_private;
e                 642 fs/binfmt_misc.c 	entry_status(e, page);
e                 654 fs/binfmt_misc.c 	Node *e = file_inode(file)->i_private;
e                 660 fs/binfmt_misc.c 		clear_bit(Enabled, &e->flags);
e                 664 fs/binfmt_misc.c 		set_bit(Enabled, &e->flags);
e                 671 fs/binfmt_misc.c 		if (!list_empty(&e->list))
e                 672 fs/binfmt_misc.c 			kill_node(e);
e                 694 fs/binfmt_misc.c 	Node *e;
e                 700 fs/binfmt_misc.c 	e = create_entry(buffer, count);
e                 702 fs/binfmt_misc.c 	if (IS_ERR(e))
e                 703 fs/binfmt_misc.c 		return PTR_ERR(e);
e                 706 fs/binfmt_misc.c 	dentry = lookup_one_len(e->name, root, strlen(e->name));
e                 728 fs/binfmt_misc.c 	if (e->flags & MISC_FMT_OPEN_FILE) {
e                 731 fs/binfmt_misc.c 		f = open_exec(e->interpreter);
e                 734 fs/binfmt_misc.c 			pr_notice("register: failed to install interpreter file %s\n", e->interpreter);
e                 740 fs/binfmt_misc.c 		e->interp_file = f;
e                 743 fs/binfmt_misc.c 	e->dentry = dget(dentry);
e                 744 fs/binfmt_misc.c 	inode->i_private = e;
e                 749 fs/binfmt_misc.c 	list_add(&e->list, &entries);
e                 759 fs/binfmt_misc.c 		kfree(e);
e                  34 fs/btrfs/backref.c 	struct extent_inode_elem *e;
e                  52 fs/btrfs/backref.c 	e = kmalloc(sizeof(*e), GFP_NOFS);
e                  53 fs/btrfs/backref.c 	if (!e)
e                  56 fs/btrfs/backref.c 	e->next = *eie;
e                  57 fs/btrfs/backref.c 	e->inum = key->objectid;
e                  58 fs/btrfs/backref.c 	e->offset = key->offset + offset;
e                  59 fs/btrfs/backref.c 	*eie = e;
e                2195 fs/btrfs/ctree.h btrfs_file_extent_inline_start(const struct btrfs_file_extent_item *e)
e                2197 fs/btrfs/ctree.h 	return (unsigned long)e + BTRFS_FILE_EXTENT_INLINE_DATA_START;
e                2231 fs/btrfs/ctree.h 						struct btrfs_item *e)
e                2233 fs/btrfs/ctree.h 	return btrfs_item_size(eb, e) - BTRFS_FILE_EXTENT_INLINE_DATA_START;
e                 109 fs/btrfs/extent_io.c #define btrfs_debug_check_extent_io_range(c, s, e)	do {} while (0)
e                 589 fs/btrfs/free-space-cache.c 	struct btrfs_free_space_entry *e;
e                 598 fs/btrfs/free-space-cache.c 	e = io_ctl->cur;
e                 599 fs/btrfs/free-space-cache.c 	entry->offset = le64_to_cpu(e->offset);
e                 600 fs/btrfs/free-space-cache.c 	entry->bytes = le64_to_cpu(e->bytes);
e                 601 fs/btrfs/free-space-cache.c 	*type = e->type;
e                 639 fs/btrfs/free-space-cache.c 	struct btrfs_free_space *e, *prev = NULL;
e                 645 fs/btrfs/free-space-cache.c 		e = rb_entry(n, struct btrfs_free_space, offset_index);
e                 648 fs/btrfs/free-space-cache.c 		if (e->bitmap || prev->bitmap)
e                 650 fs/btrfs/free-space-cache.c 		if (prev->offset + prev->bytes == e->offset) {
e                 652 fs/btrfs/free-space-cache.c 			unlink_free_space(ctl, e);
e                 653 fs/btrfs/free-space-cache.c 			prev->bytes += e->bytes;
e                 654 fs/btrfs/free-space-cache.c 			kmem_cache_free(btrfs_free_space_cachep, e);
e                 661 fs/btrfs/free-space-cache.c 		prev = e;
e                 675 fs/btrfs/free-space-cache.c 	struct btrfs_free_space *e, *n;
e                 745 fs/btrfs/free-space-cache.c 		e = kmem_cache_zalloc(btrfs_free_space_cachep,
e                 747 fs/btrfs/free-space-cache.c 		if (!e)
e                 750 fs/btrfs/free-space-cache.c 		ret = io_ctl_read_entry(&io_ctl, e, &type);
e                 752 fs/btrfs/free-space-cache.c 			kmem_cache_free(btrfs_free_space_cachep, e);
e                 756 fs/btrfs/free-space-cache.c 		if (!e->bytes) {
e                 757 fs/btrfs/free-space-cache.c 			kmem_cache_free(btrfs_free_space_cachep, e);
e                 763 fs/btrfs/free-space-cache.c 			ret = link_free_space(ctl, e);
e                 768 fs/btrfs/free-space-cache.c 				kmem_cache_free(btrfs_free_space_cachep, e);
e                 774 fs/btrfs/free-space-cache.c 			e->bitmap = kmem_cache_zalloc(
e                 776 fs/btrfs/free-space-cache.c 			if (!e->bitmap) {
e                 778 fs/btrfs/free-space-cache.c 					btrfs_free_space_cachep, e);
e                 782 fs/btrfs/free-space-cache.c 			ret = link_free_space(ctl, e);
e                 789 fs/btrfs/free-space-cache.c 				kmem_cache_free(btrfs_free_space_cachep, e);
e                 792 fs/btrfs/free-space-cache.c 			list_add_tail(&e->list, &bitmaps);
e                 804 fs/btrfs/free-space-cache.c 	list_for_each_entry_safe(e, n, &bitmaps, list) {
e                 805 fs/btrfs/free-space-cache.c 		list_del_init(&e->list);
e                 806 fs/btrfs/free-space-cache.c 		ret = io_ctl_read_bitmap(&io_ctl, e);
e                 948 fs/btrfs/free-space-cache.c 		struct btrfs_free_space *e;
e                 950 fs/btrfs/free-space-cache.c 		e = rb_entry(node, struct btrfs_free_space, offset_index);
e                 953 fs/btrfs/free-space-cache.c 		ret = io_ctl_add_entry(io_ctl, e->offset, e->bytes,
e                 954 fs/btrfs/free-space-cache.c 				       e->bitmap);
e                 958 fs/btrfs/free-space-cache.c 		if (e->bitmap) {
e                 959 fs/btrfs/free-space-cache.c 			list_add_tail(&e->list, bitmap_list);
e                 677 fs/ceph/snap.c 			   void *p, void *e, bool deletion,
e                 691 fs/ceph/snap.c 	ceph_decode_need(&p, e, sizeof(*ri), bad);
e                 694 fs/ceph/snap.c 	ceph_decode_need(&p, e, sizeof(u64)*(le32_to_cpu(ri->num_snaps) +
e                 750 fs/ceph/snap.c 	     realm, invalidate, p, e);
e                 753 fs/ceph/snap.c 	if (invalidate && p >= e)
e                 761 fs/ceph/snap.c 	if (p < e)
e                 853 fs/ceph/snap.c 	void *e = p + msg->front.iov_len;
e                 895 fs/ceph/snap.c 		ceph_decode_need(&p, e, sizeof(*ri), bad);
e                 987 fs/ceph/snap.c 	ceph_update_snap_trace(mdsc, p, e,
e                 867 fs/ceph/super.h 				  void *p, void *e, bool deletion,
e                 275 fs/cifs/dfs_cache.c #define dump_tgts(e)
e                 276 fs/cifs/dfs_cache.c #define dump_ce(e)
e                 972 fs/dlm/lowcomms.c static void free_entry(struct writequeue_entry *e)
e                 974 fs/dlm/lowcomms.c 	__free_page(e->page);
e                 975 fs/dlm/lowcomms.c 	kfree(e);
e                 985 fs/dlm/lowcomms.c static void writequeue_entry_complete(struct writequeue_entry *e, int completed)
e                 987 fs/dlm/lowcomms.c 	e->offset += completed;
e                 988 fs/dlm/lowcomms.c 	e->len -= completed;
e                 990 fs/dlm/lowcomms.c 	if (e->len == 0 && e->users == 0) {
e                 991 fs/dlm/lowcomms.c 		list_del(&e->list);
e                 992 fs/dlm/lowcomms.c 		free_entry(e);
e                1426 fs/dlm/lowcomms.c 	struct writequeue_entry *e;
e                1434 fs/dlm/lowcomms.c 	e = list_entry(con->writequeue.prev, struct writequeue_entry, list);
e                1435 fs/dlm/lowcomms.c 	if ((&e->list == &con->writequeue) ||
e                1436 fs/dlm/lowcomms.c 	    (PAGE_SIZE - e->end < len)) {
e                1437 fs/dlm/lowcomms.c 		e = NULL;
e                1439 fs/dlm/lowcomms.c 		offset = e->end;
e                1440 fs/dlm/lowcomms.c 		e->end += len;
e                1441 fs/dlm/lowcomms.c 		e->users++;
e                1445 fs/dlm/lowcomms.c 	if (e) {
e                1447 fs/dlm/lowcomms.c 		*ppc = page_address(e->page) + offset;
e                1448 fs/dlm/lowcomms.c 		return e;
e                1451 fs/dlm/lowcomms.c 	e = new_writequeue_entry(con, allocation);
e                1452 fs/dlm/lowcomms.c 	if (e) {
e                1454 fs/dlm/lowcomms.c 		offset = e->end;
e                1455 fs/dlm/lowcomms.c 		e->end += len;
e                1456 fs/dlm/lowcomms.c 		e->users++;
e                1457 fs/dlm/lowcomms.c 		list_add_tail(&e->list, &con->writequeue);
e                1466 fs/dlm/lowcomms.c 	struct writequeue_entry *e = (struct writequeue_entry *)mh;
e                1467 fs/dlm/lowcomms.c 	struct connection *con = e->con;
e                1471 fs/dlm/lowcomms.c 	users = --e->users;
e                1474 fs/dlm/lowcomms.c 	e->len = e->end - e->offset;
e                1490 fs/dlm/lowcomms.c 	struct writequeue_entry *e;
e                1500 fs/dlm/lowcomms.c 		e = list_entry(con->writequeue.next, struct writequeue_entry,
e                1502 fs/dlm/lowcomms.c 		if ((struct list_head *) e == &con->writequeue)
e                1505 fs/dlm/lowcomms.c 		len = e->len;
e                1506 fs/dlm/lowcomms.c 		offset = e->offset;
e                1507 fs/dlm/lowcomms.c 		BUG_ON(len == 0 && e->users == 0);
e                1512 fs/dlm/lowcomms.c 			ret = kernel_sendpage(con->sock, e->page, offset, len,
e                1537 fs/dlm/lowcomms.c 		writequeue_entry_complete(e, ret);
e                1560 fs/dlm/lowcomms.c 	struct writequeue_entry *e, *safe;
e                1563 fs/dlm/lowcomms.c 	list_for_each_entry_safe(e, safe, &con->writequeue, list) {
e                1564 fs/dlm/lowcomms.c 		list_del(&e->list);
e                1565 fs/dlm/lowcomms.c 		free_entry(e);
e                  34 fs/dlm/requestqueue.c 	struct rq_entry *e;
e                  37 fs/dlm/requestqueue.c 	e = kmalloc(sizeof(struct rq_entry) + length, GFP_NOFS);
e                  38 fs/dlm/requestqueue.c 	if (!e) {
e                  43 fs/dlm/requestqueue.c 	e->recover_seq = ls->ls_recover_seq & 0xFFFFFFFF;
e                  44 fs/dlm/requestqueue.c 	e->nodeid = nodeid;
e                  45 fs/dlm/requestqueue.c 	memcpy(&e->request, ms, ms->m_header.h_length);
e                  48 fs/dlm/requestqueue.c 	list_add_tail(&e->list, &ls->ls_requestqueue);
e                  65 fs/dlm/requestqueue.c 	struct rq_entry *e;
e                  77 fs/dlm/requestqueue.c 		e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list);
e                  80 fs/dlm/requestqueue.c 		ms = &e->request;
e                  86 fs/dlm/requestqueue.c 			  e->recover_seq);
e                  88 fs/dlm/requestqueue.c 		dlm_receive_message_saved(ls, &e->request, e->recover_seq);
e                  91 fs/dlm/requestqueue.c 		list_del(&e->list);
e                  92 fs/dlm/requestqueue.c 		kfree(e);
e                 156 fs/dlm/requestqueue.c 	struct rq_entry *e, *safe;
e                 159 fs/dlm/requestqueue.c 	list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) {
e                 160 fs/dlm/requestqueue.c 		ms =  &e->request;
e                 162 fs/dlm/requestqueue.c 		if (purge_request(ls, ms, e->nodeid)) {
e                 163 fs/dlm/requestqueue.c 			list_del(&e->list);
e                 164 fs/dlm/requestqueue.c 			kfree(e);
e                 185 fs/erofs/erofs_fs.h static inline unsigned int erofs_xattr_entry_size(struct erofs_xattr_entry *e)
e                 188 fs/erofs/erofs_fs.h 				 e->e_name_len + le16_to_cpu(e->e_value_size));
e                  95 fs/ext2/acl.c  	char *e;
e                 104 fs/ext2/acl.c  	e = (char *)ext_acl + sizeof(ext2_acl_header);
e                 107 fs/ext2/acl.c  		ext2_acl_entry *entry = (ext2_acl_entry *)e;
e                 114 fs/ext2/acl.c  				e += sizeof(ext2_acl_entry);
e                 119 fs/ext2/acl.c  				e += sizeof(ext2_acl_entry);
e                 126 fs/ext2/acl.c  				e += sizeof(ext2_acl_entry_short);
e                  94 fs/ext4/acl.c  	char *e;
e                 103 fs/ext4/acl.c  	e = (char *)ext_acl + sizeof(ext4_acl_header);
e                 106 fs/ext4/acl.c  		ext4_acl_entry *entry = (ext4_acl_entry *)e;
e                 113 fs/ext4/acl.c  			e += sizeof(ext4_acl_entry);
e                 118 fs/ext4/acl.c  			e += sizeof(ext4_acl_entry);
e                 125 fs/ext4/acl.c  			e += sizeof(ext4_acl_entry_short);
e                 186 fs/ext4/xattr.c 	struct ext4_xattr_entry *e = entry;
e                 189 fs/ext4/xattr.c 	while (!IS_LAST_ENTRY(e)) {
e                 190 fs/ext4/xattr.c 		struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
e                 193 fs/ext4/xattr.c 		if (strnlen(e->e_name, e->e_name_len) != e->e_name_len)
e                 195 fs/ext4/xattr.c 		e = next;
e                 218 fs/ext4/xattr.c 			if (value < (void *)e + sizeof(u32) ||
e                 462 fs/f2fs/checkpoint.c 	struct ino_entry *e, *tmp;
e                 469 fs/f2fs/checkpoint.c 	e = radix_tree_lookup(&im->ino_root, ino);
e                 470 fs/f2fs/checkpoint.c 	if (!e) {
e                 471 fs/f2fs/checkpoint.c 		e = tmp;
e                 472 fs/f2fs/checkpoint.c 		if (unlikely(radix_tree_insert(&im->ino_root, ino, e)))
e                 475 fs/f2fs/checkpoint.c 		memset(e, 0, sizeof(struct ino_entry));
e                 476 fs/f2fs/checkpoint.c 		e->ino = ino;
e                 478 fs/f2fs/checkpoint.c 		list_add_tail(&e->list, &im->ino_list);
e                 484 fs/f2fs/checkpoint.c 		f2fs_set_bit(devidx, (char *)&e->dirty_device);
e                 489 fs/f2fs/checkpoint.c 	if (e != tmp)
e                 496 fs/f2fs/checkpoint.c 	struct ino_entry *e;
e                 499 fs/f2fs/checkpoint.c 	e = radix_tree_lookup(&im->ino_root, ino);
e                 500 fs/f2fs/checkpoint.c 	if (e) {
e                 501 fs/f2fs/checkpoint.c 		list_del(&e->list);
e                 505 fs/f2fs/checkpoint.c 		kmem_cache_free(ino_entry_slab, e);
e                 527 fs/f2fs/checkpoint.c 	struct ino_entry *e;
e                 530 fs/f2fs/checkpoint.c 	e = radix_tree_lookup(&im->ino_root, ino);
e                 532 fs/f2fs/checkpoint.c 	return e ? true : false;
e                 537 fs/f2fs/checkpoint.c 	struct ino_entry *e, *tmp;
e                 544 fs/f2fs/checkpoint.c 		list_for_each_entry_safe(e, tmp, &im->ino_list, list) {
e                 545 fs/f2fs/checkpoint.c 			list_del(&e->list);
e                 546 fs/f2fs/checkpoint.c 			radix_tree_delete(&im->ino_root, e->ino);
e                 547 fs/f2fs/checkpoint.c 			kmem_cache_free(ino_entry_slab, e);
e                 564 fs/f2fs/checkpoint.c 	struct ino_entry *e;
e                 568 fs/f2fs/checkpoint.c 	e = radix_tree_lookup(&im->ino_root, ino);
e                 569 fs/f2fs/checkpoint.c 	if (e && f2fs_test_bit(devidx, (char *)&e->dirty_device))
e                 159 fs/f2fs/node.c static void __free_nat_entry(struct nat_entry *e)
e                 161 fs/f2fs/node.c 	kmem_cache_free(nat_entry_slab, e);
e                 207 fs/f2fs/node.c static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
e                 209 fs/f2fs/node.c 	radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
e                 211 fs/f2fs/node.c 	__free_nat_entry(e);
e                 355 fs/f2fs/node.c 	struct nat_entry *e;
e                 359 fs/f2fs/node.c 	e = __lookup_nat_cache(nm_i, nid);
e                 360 fs/f2fs/node.c 	if (e) {
e                 361 fs/f2fs/node.c 		if (!get_nat_flag(e, IS_CHECKPOINTED) &&
e                 362 fs/f2fs/node.c 				!get_nat_flag(e, HAS_FSYNCED_INODE))
e                 372 fs/f2fs/node.c 	struct nat_entry *e;
e                 376 fs/f2fs/node.c 	e = __lookup_nat_cache(nm_i, nid);
e                 377 fs/f2fs/node.c 	if (e && !get_nat_flag(e, IS_CHECKPOINTED))
e                 386 fs/f2fs/node.c 	struct nat_entry *e;
e                 390 fs/f2fs/node.c 	e = __lookup_nat_cache(nm_i, ino);
e                 391 fs/f2fs/node.c 	if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
e                 392 fs/f2fs/node.c 			(get_nat_flag(e, IS_CHECKPOINTED) ||
e                 393 fs/f2fs/node.c 			 get_nat_flag(e, HAS_FSYNCED_INODE)))
e                 404 fs/f2fs/node.c 	struct nat_entry *new, *e;
e                 411 fs/f2fs/node.c 	e = __lookup_nat_cache(nm_i, nid);
e                 412 fs/f2fs/node.c 	if (!e)
e                 413 fs/f2fs/node.c 		e = __init_nat_entry(nm_i, new, ne, false);
e                 415 fs/f2fs/node.c 		f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
e                 416 fs/f2fs/node.c 				nat_get_blkaddr(e) !=
e                 418 fs/f2fs/node.c 				nat_get_version(e) != ne->version);
e                 420 fs/f2fs/node.c 	if (e != new)
e                 428 fs/f2fs/node.c 	struct nat_entry *e;
e                 432 fs/f2fs/node.c 	e = __lookup_nat_cache(nm_i, ni->nid);
e                 433 fs/f2fs/node.c 	if (!e) {
e                 434 fs/f2fs/node.c 		e = __init_nat_entry(nm_i, new, NULL, true);
e                 435 fs/f2fs/node.c 		copy_node_info(&e->ni, ni);
e                 443 fs/f2fs/node.c 		copy_node_info(&e->ni, ni);
e                 447 fs/f2fs/node.c 	if (e != new)
e                 451 fs/f2fs/node.c 	f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
e                 452 fs/f2fs/node.c 	f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
e                 454 fs/f2fs/node.c 	f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
e                 456 fs/f2fs/node.c 	f2fs_bug_on(sbi, __is_valid_data_blkaddr(nat_get_blkaddr(e)) &&
e                 460 fs/f2fs/node.c 	if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
e                 461 fs/f2fs/node.c 		unsigned char version = nat_get_version(e);
e                 462 fs/f2fs/node.c 		nat_set_version(e, inc_node_version(version));
e                 466 fs/f2fs/node.c 	nat_set_blkaddr(e, new_blkaddr);
e                 468 fs/f2fs/node.c 		set_nat_flag(e, IS_CHECKPOINTED, false);
e                 469 fs/f2fs/node.c 	__set_nat_cache_dirty(nm_i, e);
e                 473 fs/f2fs/node.c 		e = __lookup_nat_cache(nm_i, ni->ino);
e                 474 fs/f2fs/node.c 	if (e) {
e                 476 fs/f2fs/node.c 			set_nat_flag(e, HAS_FSYNCED_INODE, true);
e                 477 fs/f2fs/node.c 		set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
e                 526 fs/f2fs/node.c 	struct nat_entry *e;
e                 535 fs/f2fs/node.c 	e = __lookup_nat_cache(nm_i, nid);
e                 536 fs/f2fs/node.c 	if (e) {
e                 537 fs/f2fs/node.c 		ni->ino = nat_get_ino(e);
e                 538 fs/f2fs/node.c 		ni->blk_addr = nat_get_blkaddr(e);
e                 539 fs/f2fs/node.c 		ni->version = nat_get_version(e);
e                2133 fs/f2fs/node.c 	struct free_nid *i, *e;
e                2180 fs/f2fs/node.c 		e = __lookup_free_nid_list(nm_i, nid);
e                2181 fs/f2fs/node.c 		if (e) {
e                2182 fs/f2fs/node.c 			if (e->state == FREE_NID)
e                  85 fs/fs_parser.c 	const struct fs_parameter_enum *e;
e                 190 fs/fs_parser.c 		for (e = desc->enums; e->name[0]; e++) {
e                 191 fs/fs_parser.c 			if (e->opt == p->opt &&
e                 192 fs/fs_parser.c 			    strcmp(e->name, param->string) == 0) {
e                 193 fs/fs_parser.c 				result->uint_32 = e->value;
e                 363 fs/fs_parser.c 	const struct fs_parameter_enum *e;
e                 417 fs/fs_parser.c 		for (e = desc->enums; e->name[0]; e++) {
e                 422 fs/fs_parser.c 				if (param->opt == e->opt &&
e                 425 fs/fs_parser.c 					       name, e - desc->enums, param->name);
e                 437 fs/fs_parser.c 			for (e = desc->enums; e->name[0]; e++)
e                 438 fs/fs_parser.c 				if (e->opt == param->opt)
e                 440 fs/fs_parser.c 			if (!e->name[0]) {
e                  48 fs/hfs/bfind.c 	int b, e;
e                  52 fs/hfs/bfind.c 	e = bnode->num_recs - 1;
e                  55 fs/hfs/bfind.c 		rec = (e + b) / 2;
e                  65 fs/hfs/bfind.c 			e = rec;
e                  72 fs/hfs/bfind.c 			e = rec - 1;
e                  73 fs/hfs/bfind.c 	} while (b <= e);
e                  74 fs/hfs/bfind.c 	if (rec != e && e >= 0) {
e                  75 fs/hfs/bfind.c 		len = hfs_brec_lenoff(bnode, e, &off);
e                  76 fs/hfs/bfind.c 		keylen = hfs_brec_keylen(bnode, e);
e                  84 fs/hfs/bfind.c 	fd->record = e;
e                 119 fs/hfsplus/bfind.c 	int b, e;
e                 124 fs/hfsplus/bfind.c 	e = bnode->num_recs - 1;
e                 127 fs/hfsplus/bfind.c 		rec = (e + b) / 2;
e                 135 fs/hfsplus/bfind.c 		if (rec_found(bnode, fd, &b, &e, &rec)) {
e                 139 fs/hfsplus/bfind.c 	} while (b <= e);
e                 141 fs/hfsplus/bfind.c 	if (rec != e && e >= 0) {
e                 142 fs/hfsplus/bfind.c 		len = hfs_brec_lenoff(bnode, e, &off);
e                 143 fs/hfsplus/bfind.c 		keylen = hfs_brec_keylen(bnode, e);
e                 152 fs/hfsplus/bfind.c 	fd->record = e;
e                 104 fs/hfsplus/unicode.c 	int i, s, e;
e                 107 fs/hfsplus/unicode.c 	e = p[1];
e                 108 fs/hfsplus/unicode.c 	if (!e || cc < p[s * 2] || cc > p[e * 2])
e                 111 fs/hfsplus/unicode.c 		i = (s + e) / 2;
e                 115 fs/hfsplus/unicode.c 			e = i - 1;
e                 118 fs/hfsplus/unicode.c 	} while (s <= e);
e                  92 fs/hpfs/dir.c  		int e = 0;
e                  98 fs/hpfs/dir.c  			e = 1;
e                 103 fs/hpfs/dir.c  			e = 1;
e                 107 fs/hpfs/dir.c  		if (e) {
e                 132 fs/jffs2/acl.c 	void *e;
e                 141 fs/jffs2/acl.c 	e = header + 1;
e                 144 fs/jffs2/acl.c 		entry = e;
e                 151 fs/jffs2/acl.c 				e += sizeof(struct jffs2_acl_entry);
e                 156 fs/jffs2/acl.c 				e += sizeof(struct jffs2_acl_entry);
e                 163 fs/jffs2/acl.c 				e += sizeof(struct jffs2_acl_entry_short);
e                  87 fs/jffs2/os-linux.h #define jffs2_flash_writev(a,b,c,d,e,f) jffs2_flash_direct_writev(a,b,c,d,e)
e                 209 fs/jffs2/summary.h #define jffs2_sum_scan_sumnode(a,b,c,d,e) (0)
e                  99 fs/nfsd/nfs4idmap.c 	struct ent *e = kmalloc(sizeof(*e), GFP_KERNEL);
e                 100 fs/nfsd/nfs4idmap.c 	if (e)
e                 101 fs/nfsd/nfs4idmap.c 		return &e->h;
e                 202 fs/ocfs2/cluster/tcp.c # define o2net_init_nst(a, b, c, d, e)
e                 147 fs/ocfs2/dlmglue.h #define ocfs2_inode_lock_full(i, r, e, f)\
e                 148 fs/ocfs2/dlmglue.h 		ocfs2_inode_lock_full_nested(i, r, e, f, OI_LS_NORMAL)
e                 149 fs/ocfs2/dlmglue.h #define ocfs2_inode_lock_nested(i, b, e, s)\
e                 150 fs/ocfs2/dlmglue.h 		ocfs2_inode_lock_full_nested(i, b, e, 0, s)
e                 153 fs/ocfs2/dlmglue.h #define ocfs2_inode_lock(i, b, e) ocfs2_inode_lock_full_nested(i, b, e, 0, OI_LS_NORMAL)
e                 154 fs/ocfs2/dlmglue.h #define ocfs2_try_inode_lock(i, b, e)\
e                 155 fs/ocfs2/dlmglue.h 		ocfs2_inode_lock_full_nested(i, b, e, OCFS2_META_LOCK_NOQUEUE,\
e                3121 fs/reiserfs/reiserfs.h #define PROC_EXP( e )   e
e                3154 fs/reiserfs/reiserfs.h #define PROC_EXP( e )
e                 138 fs/reiserfs/xattr_acl.c 	char *e;
e                 149 fs/reiserfs/xattr_acl.c 	e = (char *)ext_acl + sizeof(reiserfs_acl_header);
e                 152 fs/reiserfs/xattr_acl.c 		reiserfs_acl_entry *entry = (reiserfs_acl_entry *) e;
e                 159 fs/reiserfs/xattr_acl.c 			e += sizeof(reiserfs_acl_entry);
e                 164 fs/reiserfs/xattr_acl.c 			e += sizeof(reiserfs_acl_entry);
e                 171 fs/reiserfs/xattr_acl.c 			e += sizeof(reiserfs_acl_entry_short);
e                1250 fs/ubifs/recovery.c 	struct size_entry *e;
e                1254 fs/ubifs/recovery.c 		e = rb_entry(parent, struct size_entry, rb);
e                1255 fs/ubifs/recovery.c 		if (inum < e->inum)
e                1261 fs/ubifs/recovery.c 	e = kzalloc(sizeof(struct size_entry), GFP_KERNEL);
e                1262 fs/ubifs/recovery.c 	if (!e)
e                1265 fs/ubifs/recovery.c 	e->inum = inum;
e                1266 fs/ubifs/recovery.c 	e->i_size = i_size;
e                1267 fs/ubifs/recovery.c 	e->d_size = d_size;
e                1268 fs/ubifs/recovery.c 	e->exists = exists;
e                1270 fs/ubifs/recovery.c 	rb_link_node(&e->rb, parent, p);
e                1271 fs/ubifs/recovery.c 	rb_insert_color(&e->rb, &c->size_tree);
e                1284 fs/ubifs/recovery.c 	struct size_entry *e;
e                1287 fs/ubifs/recovery.c 		e = rb_entry(p, struct size_entry, rb);
e                1288 fs/ubifs/recovery.c 		if (inum < e->inum)
e                1290 fs/ubifs/recovery.c 		else if (inum > e->inum)
e                1293 fs/ubifs/recovery.c 			return e;
e                1305 fs/ubifs/recovery.c 	struct size_entry *e = find_ino(c, inum);
e                1307 fs/ubifs/recovery.c 	if (!e)
e                1309 fs/ubifs/recovery.c 	rb_erase(&e->rb, &c->size_tree);
e                1310 fs/ubifs/recovery.c 	kfree(e);
e                1319 fs/ubifs/recovery.c 	struct size_entry *e, *n;
e                1321 fs/ubifs/recovery.c 	rbtree_postorder_for_each_entry_safe(e, n, &c->size_tree, rb) {
e                1322 fs/ubifs/recovery.c 		iput(e->inode);
e                1323 fs/ubifs/recovery.c 		kfree(e);
e                1358 fs/ubifs/recovery.c 	struct size_entry *e;
e                1366 fs/ubifs/recovery.c 			e = find_ino(c, inum);
e                1367 fs/ubifs/recovery.c 			if (e) {
e                1368 fs/ubifs/recovery.c 				e->i_size = new_size;
e                1369 fs/ubifs/recovery.c 				e->exists = 1;
e                1378 fs/ubifs/recovery.c 		e = find_ino(c, inum);
e                1379 fs/ubifs/recovery.c 		if (e) {
e                1380 fs/ubifs/recovery.c 			if (new_size > e->d_size)
e                1381 fs/ubifs/recovery.c 				e->d_size = new_size;
e                1389 fs/ubifs/recovery.c 		e = find_ino(c, inum);
e                1390 fs/ubifs/recovery.c 		if (e)
e                1391 fs/ubifs/recovery.c 			e->d_size = new_size;
e                1402 fs/ubifs/recovery.c static int fix_size_in_place(struct ubifs_info *c, struct size_entry *e)
e                1412 fs/ubifs/recovery.c 	ino_key_init(c, &key, e->inum);
e                1421 fs/ubifs/recovery.c 	if (i_size >= e->d_size)
e                1429 fs/ubifs/recovery.c 	ino->size = cpu_to_le64(e->d_size);
e                1444 fs/ubifs/recovery.c 		  (unsigned long)e->inum, lnum, offs, i_size, e->d_size);
e                1449 fs/ubifs/recovery.c 		   (unsigned long)e->inum, e->i_size, e->d_size, err);
e                1458 fs/ubifs/recovery.c static int inode_fix_size(struct ubifs_info *c, struct size_entry *e)
e                1465 fs/ubifs/recovery.c 		ubifs_assert(c, !e->inode);
e                1467 fs/ubifs/recovery.c 	if (e->inode) {
e                1469 fs/ubifs/recovery.c 		inode = e->inode;
e                1471 fs/ubifs/recovery.c 		inode = ubifs_iget(c->vfs_sb, e->inum);
e                1475 fs/ubifs/recovery.c 		if (inode->i_size >= e->d_size) {
e                1485 fs/ubifs/recovery.c 			  (unsigned long)e->inum,
e                1486 fs/ubifs/recovery.c 			  inode->i_size, e->d_size);
e                1490 fs/ubifs/recovery.c 		inode->i_size = e->d_size;
e                1491 fs/ubifs/recovery.c 		ui->ui_size = e->d_size;
e                1492 fs/ubifs/recovery.c 		ui->synced_i_size = e->d_size;
e                1494 fs/ubifs/recovery.c 		e->inode = inode;
e                1512 fs/ubifs/recovery.c 	rb_erase(&e->rb, &c->size_tree);
e                1513 fs/ubifs/recovery.c 	kfree(e);
e                1533 fs/ubifs/recovery.c 		struct size_entry *e;
e                1536 fs/ubifs/recovery.c 		e = rb_entry(this, struct size_entry, rb);
e                1540 fs/ubifs/recovery.c 		if (!e->exists) {
e                1543 fs/ubifs/recovery.c 			ino_key_init(c, &key, e->inum);
e                1550 fs/ubifs/recovery.c 					  (unsigned long)e->inum);
e                1551 fs/ubifs/recovery.c 				err = ubifs_tnc_remove_ino(c, e->inum);
e                1557 fs/ubifs/recovery.c 				e->exists = 1;
e                1558 fs/ubifs/recovery.c 				e->i_size = le64_to_cpu(ino->size);
e                1562 fs/ubifs/recovery.c 		if (e->exists && e->i_size < e->d_size) {
e                1571 fs/ubifs/recovery.c 				err = fix_size_in_place(c, e);
e                1574 fs/ubifs/recovery.c 				iput(e->inode);
e                1576 fs/ubifs/recovery.c 				err = inode_fix_size(c, e);
e                1583 fs/ubifs/recovery.c 		rb_erase(&e->rb, &c->size_tree);
e                1584 fs/ubifs/recovery.c 		kfree(e);
e                 902 fs/xfs/xfs_bmap_util.c 		xfs_fileoff_t	s, e;
e                 911 fs/xfs/xfs_bmap_util.c 			e = startoffset_fsb + allocatesize_fsb;
e                 914 fs/xfs/xfs_bmap_util.c 				e += temp;
e                 915 fs/xfs/xfs_bmap_util.c 			div_u64_rem(e, extsz, &temp);
e                 917 fs/xfs/xfs_bmap_util.c 				e += extsz - temp;
e                 920 fs/xfs/xfs_bmap_util.c 			e = allocatesize_fsb;
e                 930 fs/xfs/xfs_bmap_util.c 		resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
e                  27 fs/xfs/xfs_error.h #define	XFS_ERROR_REPORT(e, lvl, mp)	\
e                  28 fs/xfs/xfs_error.h 	xfs_error_report(e, lvl, mp, __FILE__, __LINE__, __return_address)
e                  29 fs/xfs/xfs_error.h #define	XFS_CORRUPTION_ERROR(e, lvl, mp, buf, bufsize)	\
e                  30 fs/xfs/xfs_error.h 	xfs_corruption_error(e, lvl, mp, buf, bufsize, \
e                  33 include/crypto/internal/rsa.h 	const u8 *e;
e                 325 include/drm/drm_edid.h #define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8))
e                 380 include/drm/drm_file.h 				  struct drm_event *e);
e                 384 include/drm/drm_file.h 			   struct drm_event *e);
e                 387 include/drm/drm_file.h void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e);
e                 388 include/drm/drm_file.h void drm_send_event(struct drm_device *dev, struct drm_pending_event *e);
e                 202 include/drm/drm_vblank.h 			       struct drm_pending_vblank_event *e);
e                 204 include/drm/drm_vblank.h 			      struct drm_pending_vblank_event *e);
e                 205 include/drm/drm_vblank.h void drm_vblank_set_event(struct drm_pending_vblank_event *e,
e                 247 include/linux/amba/clcd.h #define CHECK(e,l,h) (var->e < l || var->e > h)
e                 270 include/linux/amba/clcd.h #define CHECK(e) (var->e != fb->fb.var.e)
e                  32 include/linux/average.h 	static inline void ewma_##name##_init(struct ewma_##name *e)	\
e                  42 include/linux/average.h 		e->internal = 0;					\
e                  45 include/linux/average.h 	ewma_##name##_read(struct ewma_##name *e)			\
e                  51 include/linux/average.h 		return e->internal >> (_precision);			\
e                  53 include/linux/average.h 	static inline void ewma_##name##_add(struct ewma_##name *e,	\
e                  56 include/linux/average.h 		unsigned long internal = READ_ONCE(e->internal);	\
e                  65 include/linux/average.h 		WRITE_ONCE(e->internal, internal ?			\
e                   8 include/linux/build_bug.h #define BUILD_BUG_ON_ZERO(e) (0)
e                  16 include/linux/build_bug.h #define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:(-!!(e)); }))
e                  30 include/linux/build_bug.h #define BUILD_BUG_ON_INVALID(e) ((void)(sizeof((__force long)(e))))
e                  22 include/linux/crush/hash.h 			    __u32 e);
e                 163 include/linux/intel-iommu.h #define ecap_smpwc(e)		(((e) >> 48) & 0x1)
e                 164 include/linux/intel-iommu.h #define ecap_flts(e)		(((e) >> 47) & 0x1)
e                 165 include/linux/intel-iommu.h #define ecap_slts(e)		(((e) >> 46) & 0x1)
e                 166 include/linux/intel-iommu.h #define ecap_smts(e)		(((e) >> 43) & 0x1)
e                 167 include/linux/intel-iommu.h #define ecap_dit(e)		((e >> 41) & 0x1)
e                 168 include/linux/intel-iommu.h #define ecap_pasid(e)		((e >> 40) & 0x1)
e                 169 include/linux/intel-iommu.h #define ecap_pss(e)		((e >> 35) & 0x1f)
e                 170 include/linux/intel-iommu.h #define ecap_eafs(e)		((e >> 34) & 0x1)
e                 171 include/linux/intel-iommu.h #define ecap_nwfs(e)		((e >> 33) & 0x1)
e                 172 include/linux/intel-iommu.h #define ecap_srs(e)		((e >> 31) & 0x1)
e                 173 include/linux/intel-iommu.h #define ecap_ers(e)		((e >> 30) & 0x1)
e                 174 include/linux/intel-iommu.h #define ecap_prs(e)		((e >> 29) & 0x1)
e                 175 include/linux/intel-iommu.h #define ecap_broken_pasid(e)	((e >> 28) & 0x1)
e                 176 include/linux/intel-iommu.h #define ecap_dis(e)		((e >> 27) & 0x1)
e                 177 include/linux/intel-iommu.h #define ecap_nest(e)		((e >> 26) & 0x1)
e                 178 include/linux/intel-iommu.h #define ecap_mts(e)		((e >> 25) & 0x1)
e                 179 include/linux/intel-iommu.h #define ecap_ecs(e)		((e >> 24) & 0x1)
e                 180 include/linux/intel-iommu.h #define ecap_iotlb_offset(e) 	((((e) >> 8) & 0x3ff) * 16)
e                 181 include/linux/intel-iommu.h #define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
e                 182 include/linux/intel-iommu.h #define ecap_coherent(e)	((e) & 0x1)
e                 183 include/linux/intel-iommu.h #define ecap_qis(e)		((e) & 0x2)
e                 184 include/linux/intel-iommu.h #define ecap_pass_through(e)	((e >> 6) & 0x1)
e                 185 include/linux/intel-iommu.h #define ecap_eim_support(e)	((e >> 4) & 0x1)
e                 186 include/linux/intel-iommu.h #define ecap_ir_support(e)	((e >> 3) & 0x1)
e                 187 include/linux/intel-iommu.h #define ecap_dev_iotlb_support(e)	(((e) >> 2) & 0x1)
e                 188 include/linux/intel-iommu.h #define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
e                 189 include/linux/intel-iommu.h #define ecap_sc_support(e)	((e >> 7) & 0x1) /* Snooping Control */
e                 381 include/linux/kvm_host.h 	int (*set)(struct kvm_kernel_irq_routing_entry *e,
e                 990 include/linux/kvm_host.h int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
e                1144 include/linux/kvm_host.h 			  struct kvm_kernel_irq_routing_entry *e,
e                 251 include/linux/lru_cache.h extern unsigned int lc_put(struct lru_cache *lc, struct lc_element *e);
e                 300 include/linux/lru_cache.h extern unsigned int lc_index_of(struct lru_cache *lc, struct lc_element *e);
e                8548 include/linux/mlx5/mlx5_ifc.h 	u8         e[0x1];
e                8581 include/linux/mlx5/mlx5_ifc.h 	u8         e[0x2];
e                8915 include/linux/mlx5/mlx5_ifc.h 	u8         e[0x2];
e                9613 include/linux/mlx5/mlx5_ifc.h 	u8         e[0x1];
e                 103 include/linux/netdevice.h #define net_xmit_eval(e)	((e) == NET_XMIT_CN ? 0 : (e))
e                 104 include/linux/netdevice.h #define net_xmit_errno(e)	((e) != NET_XMIT_CN ? -ENOBUFS : 0)
e                 121 include/linux/netfilter.h static inline struct nf_hook_ops **nf_hook_entries_get_hook_ops(const struct nf_hook_entries *e)
e                 123 include/linux/netfilter.h 	unsigned int n = e->num_hook_entries;
e                 126 include/linux/netfilter.h 	hook_end = &e->hooks[n]; /* this is *past* ->hooks[]! */
e                 200 include/linux/netfilter.h 		 const struct nf_hook_entries *e, unsigned int i);
e                 129 include/linux/netfilter/ipset/ip_set.h #define ext_timeout(e, s)	\
e                 130 include/linux/netfilter/ipset/ip_set.h ((unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT]))
e                 131 include/linux/netfilter/ipset/ip_set.h #define ext_counter(e, s)	\
e                 132 include/linux/netfilter/ipset/ip_set.h ((struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER]))
e                 133 include/linux/netfilter/ipset/ip_set.h #define ext_comment(e, s)	\
e                 134 include/linux/netfilter/ipset/ip_set.h ((struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT]))
e                 135 include/linux/netfilter/ipset/ip_set.h #define ext_skbinfo(e, s)	\
e                 136 include/linux/netfilter/ipset/ip_set.h ((struct ip_set_skbinfo *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_SKBINFO]))
e                 349 include/linux/netfilter/ipset/ip_set.h 				 const void *e, bool active);
e                  74 include/linux/netfilter_arp/arp_tables.h compat_arpt_get_target(struct compat_arpt_entry *e)
e                  76 include/linux/netfilter_arp/arp_tables.h 	return (void *)e + e->target_offset;
e                  75 include/linux/netfilter_bridge/ebtables.h 	struct ebt_entry *e; /* pointer to entry data */
e                  21 include/linux/netfilter_ingress.h 	struct nf_hook_entries *e = rcu_dereference(skb->dev->nf_hooks_ingress);
e                  28 include/linux/netfilter_ingress.h 	if (unlikely(!e))
e                  34 include/linux/netfilter_ingress.h 	ret = nf_hook_slow(skb, &state, e, 0);
e                  84 include/linux/netfilter_ipv4/ip_tables.h compat_ipt_get_target(struct compat_ipt_entry *e)
e                  86 include/linux/netfilter_ipv4/ip_tables.h 	return (void *)e + e->target_offset;
e                  50 include/linux/netfilter_ipv6/ip6_tables.h compat_ip6t_get_target(struct compat_ip6t_entry *e)
e                  52 include/linux/netfilter_ipv6/ip6_tables.h 	return (void *)e + e->target_offset;
e                 431 include/linux/suspend.h extern void __register_nosave_region(unsigned long b, unsigned long e, int km);
e                 432 include/linux/suspend.h static inline void __init register_nosave_region(unsigned long b, unsigned long e)
e                 434 include/linux/suspend.h 	__register_nosave_region(b, e, 0);
e                 436 include/linux/suspend.h static inline void __init register_nosave_region_late(unsigned long b, unsigned long e)
e                 438 include/linux/suspend.h 	__register_nosave_region(b, e, 1);
e                 455 include/linux/suspend.h static inline void register_nosave_region(unsigned long b, unsigned long e) {}
e                 456 include/linux/suspend.h static inline void register_nosave_region_late(unsigned long b, unsigned long e) {}
e                 513 include/linux/swap.h #define free_swap_and_cache(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
e                 514 include/linux/swap.h #define swapcache_prepare(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
e                 371 include/linux/uaccess.h #define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
e                 372 include/linux/uaccess.h #define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
e                 373 include/linux/uaccess.h #define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
e                 417 include/media/v4l2-ioctl.h 			     struct v4l2_exportbuffer *e);
e                 651 include/net/bluetooth/hci_core.h static inline long inquiry_entry_age(struct inquiry_entry *e)
e                 653 include/net/bluetooth/hci_core.h 	return jiffies - e->timestamp;
e                 440 include/net/dsa.h 			       struct ethtool_eee *e);
e                 442 include/net/dsa.h 			       struct ethtool_eee *e);
e                  12 include/net/fou.h size_t fou_encap_hlen(struct ip_tunnel_encap *e);
e                  13 include/net/fou.h size_t gue_encap_hlen(struct ip_tunnel_encap *e);
e                  15 include/net/fou.h int __fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
e                  17 include/net/fou.h int __gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
e                  69 include/net/ip6_tunnel.h 	size_t (*encap_hlen)(struct ip_tunnel_encap *e);
e                  70 include/net/ip6_tunnel.h 	int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
e                  88 include/net/ip6_tunnel.h static inline int ip6_encap_hlen(struct ip_tunnel_encap *e)
e                  93 include/net/ip6_tunnel.h 	if (e->type == TUNNEL_ENCAP_NONE)
e                  96 include/net/ip6_tunnel.h 	if (e->type >= MAX_IPTUN_ENCAP_OPS)
e                 100 include/net/ip6_tunnel.h 	ops = rcu_dereference(ip6tun_encaps[e->type]);
e                 102 include/net/ip6_tunnel.h 		hlen = ops->encap_hlen(e);
e                 293 include/net/ip_tunnels.h 	size_t (*encap_hlen)(struct ip_tunnel_encap *e);
e                 294 include/net/ip_tunnels.h 	int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
e                 332 include/net/ip_tunnels.h static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
e                 337 include/net/ip_tunnels.h 	if (e->type == TUNNEL_ENCAP_NONE)
e                 340 include/net/ip_tunnels.h 	if (e->type >= MAX_IPTUN_ENCAP_OPS)
e                 344 include/net/ip_tunnels.h 	ops = rcu_dereference(iptun_encaps[e->type]);
e                 346 include/net/ip_tunnels.h 		hlen = ops->encap_hlen(e);
e                  46 include/net/netfilter/nf_conntrack_ecache.h 	struct nf_conntrack_ecache *e;
e                  55 include/net/netfilter/nf_conntrack_ecache.h 	e = nf_ct_ext_add(ct, NF_CT_EXT_ECACHE, gfp);
e                  56 include/net/netfilter/nf_conntrack_ecache.h 	if (e) {
e                  57 include/net/netfilter/nf_conntrack_ecache.h 		e->ctmask  = ctmask;
e                  58 include/net/netfilter/nf_conntrack_ecache.h 		e->expmask = expmask;
e                  60 include/net/netfilter/nf_conntrack_ecache.h 	return e;
e                 109 include/net/netfilter/nf_conntrack_ecache.h 	struct nf_conntrack_ecache *e;
e                 114 include/net/netfilter/nf_conntrack_ecache.h 	e = nf_ct_ecache_find(ct);
e                 115 include/net/netfilter/nf_conntrack_ecache.h 	if (e == NULL)
e                 118 include/net/netfilter/nf_conntrack_ecache.h 	set_bit(event, &e->cache);
e                 182 include/net/netfilter/nf_conntrack_ecache.h static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events e,
e                 116 include/net/netfilter/nf_conntrack_expect.h void nf_ct_expect_iterate_destroy(bool (*iter)(struct nf_conntrack_expect *e, void *data), void *data);
e                 118 include/net/netfilter/nf_conntrack_expect.h 			      bool (*iter)(struct nf_conntrack_expect *e, void *data),
e                 776 include/net/sch_generic.h #define net_xmit_drop_count(e)	((e) & __NET_XMIT_STOLEN ? 0 : 1)
e                 778 include/net/sch_generic.h #define net_xmit_drop_count(e)	(1)
e                 430 include/soc/fsl/qman.h 	u32 e = 0;
e                 439 include/soc/fsl/qman.h 		e++;
e                 445 include/soc/fsl/qman.h 	td |= (e & QM_FQD_TD_EXP_MASK);
e                 587 include/soc/fsl/qman.h 	u32 e = 0;
e                 593 include/soc/fsl/qman.h 		e++;
e                 597 include/soc/fsl/qman.h 	th->word = cpu_to_be16(((val & 0xff) << 5) | (e & 0x1f));
e                  15 include/sound/emu8000_reg.h #define EMU8000_DATA0(e)    ((e)->port1)
e                  16 include/sound/emu8000_reg.h #define EMU8000_DATA1(e)    ((e)->port2)
e                  17 include/sound/emu8000_reg.h #define EMU8000_DATA2(e)    ((e)->port2+2)
e                  18 include/sound/emu8000_reg.h #define EMU8000_DATA3(e)    ((e)->port3)
e                  19 include/sound/emu8000_reg.h #define EMU8000_PTR(e)      ((e)->port3+2)
e                 360 include/sound/soc-dapm.h #define SND_SOC_DAPM_EVENT_ON(e)	\
e                 361 include/sound/soc-dapm.h 	(e & (SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU))
e                 362 include/sound/soc-dapm.h #define SND_SOC_DAPM_EVENT_OFF(e)	\
e                 363 include/sound/soc-dapm.h 	(e & (SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD))
e                 444 include/sound/soc-dapm.h 		struct snd_kcontrol *kcontrol, int mux, struct soc_enum *e,
e                1236 include/sound/soc.h static inline unsigned int snd_soc_enum_val_to_item(struct soc_enum *e,
e                1241 include/sound/soc.h 	if (!e->values)
e                1244 include/sound/soc.h 	for (i = 0; i < e->items; i++)
e                1245 include/sound/soc.h 		if (val == e->values[i])
e                1251 include/sound/soc.h static inline unsigned int snd_soc_enum_item_to_val(struct soc_enum *e,
e                1254 include/sound/soc.h 	if (!e->values)
e                1257 include/sound/soc.h 	return e->values[item];
e                  99 include/trace/events/kvm.h 	    TP_PROTO(__u64 e, int pin, bool coalesced),
e                 100 include/trace/events/kvm.h 	    TP_ARGS(e, pin, coalesced),
e                 103 include/trace/events/kvm.h 		__field(	__u64,		e		)
e                 109 include/trace/events/kvm.h 		__entry->e		= e;
e                 115 include/trace/events/kvm.h 		  __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
e                 116 include/trace/events/kvm.h 		  __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
e                 117 include/trace/events/kvm.h 		  (__entry->e & (1<<11)) ? "logical" : "physical",
e                 118 include/trace/events/kvm.h 		  (__entry->e & (1<<15)) ? "level" : "edge",
e                 119 include/trace/events/kvm.h 		  (__entry->e & (1<<16)) ? "|masked" : "",
e                 124 include/trace/events/kvm.h 	    TP_PROTO(__u64 e),
e                 125 include/trace/events/kvm.h 	    TP_ARGS(e),
e                 128 include/trace/events/kvm.h 		__field(	__u64,		e		)
e                 132 include/trace/events/kvm.h 		__entry->e		= e;
e                 136 include/trace/events/kvm.h 		  (u8)(__entry->e >> 56), (u8)__entry->e,
e                 137 include/trace/events/kvm.h 		  __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
e                 138 include/trace/events/kvm.h 		  (__entry->e & (1<<11)) ? "logical" : "physical",
e                 139 include/trace/events/kvm.h 		  (__entry->e & (1<<15)) ? "level" : "edge",
e                 140 include/trace/events/kvm.h 		  (__entry->e & (1<<16)) ? "|masked" : "")
e                  95 include/uapi/linux/cn_proc.h 			} e;
e                 250 include/uapi/linux/coff.h     } e;
e                 251 include/uapi/linux/coff.h   } e;
e                  94 include/uapi/linux/lightnvm.h 		struct nvm_ioctl_create_extended e;
e                  98 include/uapi/linux/map_to_7segment.h #define _SEG7(l,a,b,c,d,e,f,g)	\
e                 100 include/uapi/linux/map_to_7segment.h 	e<<BIT_SEG7_E |	f<<BIT_SEG7_F |	g<<BIT_SEG7_G )
e                 129 include/uapi/linux/netfilter/x_tables.h #define XT_MATCH_ITERATE(type, e, fn, args...)			\
e                 136 include/uapi/linux/netfilter/x_tables.h 	     __i < (e)->target_offset;				\
e                 138 include/uapi/linux/netfilter/x_tables.h 		__m = (void *)e + __i;				\
e                 200 include/uapi/linux/netfilter_arp/arp_tables.h static __inline__ struct xt_entry_target *arpt_get_target(struct arpt_entry *e)
e                 202 include/uapi/linux/netfilter_arp/arp_tables.h 	return (void *)e + e->target_offset;
e                 195 include/uapi/linux/netfilter_bridge/ebtables.h ebt_get_target(struct ebt_entry *e)
e                 197 include/uapi/linux/netfilter_bridge/ebtables.h 	return (void *)e + e->target_offset;
e                 216 include/uapi/linux/netfilter_bridge/ebtables.h #define EBT_MATCH_ITERATE(e, fn, args...)                   \
e                 223 include/uapi/linux/netfilter_bridge/ebtables.h 	     __i < (e)->watchers_offset;                    \
e                 226 include/uapi/linux/netfilter_bridge/ebtables.h 		__match = (void *)(e) + __i;                \
e                 233 include/uapi/linux/netfilter_bridge/ebtables.h 		if (__i != (e)->watchers_offset)            \
e                 239 include/uapi/linux/netfilter_bridge/ebtables.h #define EBT_WATCHER_ITERATE(e, fn, args...)                 \
e                 245 include/uapi/linux/netfilter_bridge/ebtables.h 	for (__i = e->watchers_offset;                      \
e                 246 include/uapi/linux/netfilter_bridge/ebtables.h 	     __i < (e)->target_offset;                      \
e                 249 include/uapi/linux/netfilter_bridge/ebtables.h 		__watcher = (void *)(e) + __i;              \
e                 256 include/uapi/linux/netfilter_bridge/ebtables.h 		if (__i != (e)->target_offset)              \
e                  62 include/uapi/linux/netfilter_ipv4/ip_tables.h #define IPT_MATCH_ITERATE(e, fn, args...) \
e                  63 include/uapi/linux/netfilter_ipv4/ip_tables.h 	XT_MATCH_ITERATE(struct ipt_entry, e, fn, ## args)
e                 223 include/uapi/linux/netfilter_ipv4/ip_tables.h ipt_get_target(struct ipt_entry *e)
e                 225 include/uapi/linux/netfilter_ipv4/ip_tables.h 	return (void *)e + e->target_offset;
e                  57 include/uapi/linux/netfilter_ipv6/ip6_tables.h #define IP6T_MATCH_ITERATE(e, fn, args...) \
e                  58 include/uapi/linux/netfilter_ipv6/ip6_tables.h 	XT_MATCH_ITERATE(struct ip6t_entry, e, fn, ## args)
e                 263 include/uapi/linux/netfilter_ipv6/ip6_tables.h ip6t_get_target(struct ip6t_entry *e)
e                 265 include/uapi/linux/netfilter_ipv6/ip6_tables.h 	return (void *)e + e->target_offset;
e                 702 include/uapi/linux/wireless.h 	__s16		e;		/* Exponent */
e                 560 include/video/pm3fb.h 	#define PM3FBDestReadEnables_E(e)		((e) & 0xff)
e                 327 kernel/audit_watch.c 	struct audit_entry *e;
e                 332 kernel/audit_watch.c 			e = container_of(r, struct audit_entry, rule);
e                 334 kernel/audit_watch.c 			if (e->rule.exe)
e                 335 kernel/audit_watch.c 				audit_remove_mark(e->rule.exe);
e                 338 kernel/audit_watch.c 			list_del_rcu(&e->list);
e                 339 kernel/audit_watch.c 			call_rcu(&e->rcu, audit_free_rule_rcu);
e                  81 kernel/auditfilter.c static inline void audit_free_rule(struct audit_entry *e)
e                  84 kernel/auditfilter.c 	struct audit_krule *erule = &e->rule;
e                  94 kernel/auditfilter.c 	kfree(e);
e                  99 kernel/auditfilter.c 	struct audit_entry *e = container_of(head, struct audit_entry, rcu);
e                 100 kernel/auditfilter.c 	audit_free_rule(e);
e                 894 kernel/auditfilter.c 	struct audit_entry *e, *found = NULL;
e                 905 kernel/auditfilter.c 			list_for_each_entry(e, list, list)
e                 906 kernel/auditfilter.c 				if (!audit_compare_rule(&entry->rule, &e->rule)) {
e                 907 kernel/auditfilter.c 					found = e;
e                 916 kernel/auditfilter.c 	list_for_each_entry(e, list, list)
e                 917 kernel/auditfilter.c 		if (!audit_compare_rule(&entry->rule, &e->rule)) {
e                 918 kernel/auditfilter.c 			found = e;
e                 932 kernel/auditfilter.c 	struct audit_entry *e;
e                 950 kernel/auditfilter.c 	e = audit_find_rule(entry, &list);
e                 951 kernel/auditfilter.c 	if (e) {
e                1015 kernel/auditfilter.c 	struct audit_entry  *e;
e                1032 kernel/auditfilter.c 	e = audit_find_rule(entry, &list);
e                1033 kernel/auditfilter.c 	if (!e) {
e                1038 kernel/auditfilter.c 	if (e->rule.watch)
e                1039 kernel/auditfilter.c 		audit_remove_watch_rule(&e->rule);
e                1041 kernel/auditfilter.c 	if (e->rule.tree)
e                1042 kernel/auditfilter.c 		audit_remove_tree_rule(&e->rule);
e                1044 kernel/auditfilter.c 	if (e->rule.exe)
e                1045 kernel/auditfilter.c 		audit_remove_mark_rule(&e->rule);
e                1055 kernel/auditfilter.c 	list_del_rcu(&e->list);
e                1056 kernel/auditfilter.c 	list_del(&e->rule.list);
e                1057 kernel/auditfilter.c 	call_rcu(&e->rcu, audit_free_rule_rcu);
e                1324 kernel/auditfilter.c 	struct audit_entry *e;
e                1328 kernel/auditfilter.c 	list_for_each_entry_rcu(e, &audit_filter_list[listtype], list) {
e                1331 kernel/auditfilter.c 		for (i = 0; i < e->rule.field_count; i++) {
e                1332 kernel/auditfilter.c 			struct audit_field *f = &e->rule.fields[i];
e                1370 kernel/auditfilter.c 				result = audit_exe_compare(current, e->rule.exe);
e                1383 kernel/auditfilter.c 			if (e->rule.action == AUDIT_NEVER || listtype == AUDIT_FILTER_EXCLUDE)
e                 742 kernel/auditsc.c 	struct audit_entry *e;
e                 746 kernel/auditsc.c 	list_for_each_entry_rcu(e, &audit_filter_list[AUDIT_FILTER_TASK], list) {
e                 747 kernel/auditsc.c 		if (audit_filter_rules(tsk, &e->rule, NULL, NULL,
e                 750 kernel/auditsc.c 				*key = kstrdup(e->rule.filterkey, GFP_ATOMIC);
e                 784 kernel/auditsc.c 	struct audit_entry *e;
e                 791 kernel/auditsc.c 	list_for_each_entry_rcu(e, list, list) {
e                 792 kernel/auditsc.c 		if (audit_in_mask(&e->rule, ctx->major) &&
e                 793 kernel/auditsc.c 		    audit_filter_rules(tsk, &e->rule, ctx, NULL,
e                 813 kernel/auditsc.c 	struct audit_entry *e;
e                 816 kernel/auditsc.c 	list_for_each_entry_rcu(e, list, list) {
e                 817 kernel/auditsc.c 		if (audit_in_mask(&e->rule, ctx->major) &&
e                 818 kernel/auditsc.c 		    audit_filter_rules(tsk, &e->rule, ctx, n, &state, false)) {
e                1951 kernel/auditsc.c 	struct audit_entry *e;
e                1959 kernel/auditsc.c 	list_for_each_entry_rcu(e, list, list) {
e                1960 kernel/auditsc.c 		for (i = 0; i < e->rule.field_count; i++) {
e                1961 kernel/auditsc.c 			struct audit_field *f = &e->rule.fields[i];
e                1966 kernel/auditsc.c 			    && e->rule.action == AUDIT_NEVER) {
e                2069 kernel/auditsc.c 	struct audit_entry *e;
e                2077 kernel/auditsc.c 	list_for_each_entry_rcu(e, list, list) {
e                2078 kernel/auditsc.c 		for (i = 0; i < e->rule.field_count; i++) {
e                2079 kernel/auditsc.c 			struct audit_field *f = &e->rule.fields[i];
e                2084 kernel/auditsc.c 			    && e->rule.action == AUDIT_NEVER) {
e                6306 kernel/bpf/verifier.c static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
e                6312 kernel/bpf/verifier.c 	if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
e                6315 kernel/bpf/verifier.c 	if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
e                6324 kernel/bpf/verifier.c 	if (e == BRANCH)
e                6330 kernel/bpf/verifier.c 		insn_state[t] = DISCOVERED | e;
e                6345 kernel/bpf/verifier.c 		insn_state[t] = DISCOVERED | e;
e                 236 kernel/debug/kdb/kdb_main.c 		char *e = *ep++;
e                 238 kernel/debug/kdb/kdb_main.c 		if (!e)
e                 241 kernel/debug/kdb/kdb_main.c 		if ((strncmp(match, e, matchlen) == 0)
e                 242 kernel/debug/kdb/kdb_main.c 		 && ((e[matchlen] == '\0')
e                 243 kernel/debug/kdb/kdb_main.c 		   || (e[matchlen] == '='))) {
e                 244 kernel/debug/kdb/kdb_main.c 			char *cp = strchr(e, '=');
e                  54 kernel/extable.c 	const struct exception_table_entry *e;
e                  56 kernel/extable.c 	e = search_kernel_exception_table(addr);
e                  57 kernel/extable.c 	if (!e)
e                  58 kernel/extable.c 		e = search_module_extables(addr);
e                  59 kernel/extable.c 	return e;
e                 890 kernel/locking/lockdep.c static bool in_list(struct list_head *e, struct list_head *h)
e                 895 kernel/locking/lockdep.c 		if (e == f)
e                 906 kernel/locking/lockdep.c static bool in_any_class_list(struct list_head *e)
e                 913 kernel/locking/lockdep.c 		if (in_list(e, &class->locks_after) ||
e                 914 kernel/locking/lockdep.c 		    in_list(e, &class->locks_before))
e                 922 kernel/locking/lockdep.c 	struct lock_list *e;
e                 924 kernel/locking/lockdep.c 	list_for_each_entry(e, h, entry) {
e                 925 kernel/locking/lockdep.c 		if (e->links_to != c) {
e                 928 kernel/locking/lockdep.c 			       (unsigned long)(e - list_entries),
e                 929 kernel/locking/lockdep.c 			       e->links_to && e->links_to->name ?
e                 930 kernel/locking/lockdep.c 			       e->links_to->name : "(?)",
e                 931 kernel/locking/lockdep.c 			       e->class && e->class->name ? e->class->name :
e                 984 kernel/locking/lockdep.c 	struct lock_list *e;
e                1022 kernel/locking/lockdep.c 		e = list_entries + i;
e                1023 kernel/locking/lockdep.c 		if (!in_any_class_list(&e->entry)) {
e                1025 kernel/locking/lockdep.c 			       (unsigned int)(e - list_entries),
e                1026 kernel/locking/lockdep.c 			       e->class->name ? : "(?)",
e                1027 kernel/locking/lockdep.c 			       e->links_to->name ? : "(?)");
e                1037 kernel/locking/lockdep.c 		e = list_entries + i;
e                1038 kernel/locking/lockdep.c 		if (in_any_class_list(&e->entry)) {
e                1040 kernel/locking/lockdep.c 			       (unsigned int)(e - list_entries),
e                1041 kernel/locking/lockdep.c 			       e->class && e->class->name ? e->class->name :
e                1043 kernel/locking/lockdep.c 			       e->links_to && e->links_to->name ?
e                1044 kernel/locking/lockdep.c 			       e->links_to->name : "(?)");
e                4418 kernel/module.c 	const struct exception_table_entry *e = NULL;
e                4429 kernel/module.c 	e = search_extable(mod->extable,
e                4439 kernel/module.c 	return e;
e                 720 kernel/printk/printk.c static void append_char(char **pp, char *e, char c)
e                 722 kernel/printk/printk.c 	if (*pp < e)
e                 751 kernel/printk/printk.c 	char *p = buf, *e = buf + size;
e                 759 kernel/printk/printk.c 			p += scnprintf(p, e - p, "\\x%02x", c);
e                 761 kernel/printk/printk.c 			append_char(&p, e, c);
e                 763 kernel/printk/printk.c 	append_char(&p, e, '\n');
e                 772 kernel/printk/printk.c 				append_char(&p, e, ' ');
e                 777 kernel/printk/printk.c 				append_char(&p, e, '\n');
e                 783 kernel/printk/printk.c 				p += scnprintf(p, e - p, "\\x%02x", c);
e                 787 kernel/printk/printk.c 			append_char(&p, e, c);
e                 789 kernel/printk/printk.c 		append_char(&p, e, '\n');
e                 315 kernel/sched/debug.c 		struct ctl_table *e = cpu_entries;
e                 323 kernel/sched/debug.c 			cpu_idx[i] = e;
e                 324 kernel/sched/debug.c 			e++;
e                 340 kernel/sched/debug.c 		struct ctl_table *e = cpu_idx[i];
e                 342 kernel/sched/debug.c 		if (e->child)
e                 343 kernel/sched/debug.c 			sd_free_ctl_entry(&e->child);
e                 345 kernel/sched/debug.c 		if (!e->procname) {
e                 347 kernel/sched/debug.c 			e->procname = kstrdup(buf, GFP_KERNEL);
e                 349 kernel/sched/debug.c 		e->mode = 0555;
e                 350 kernel/sched/debug.c 		e->child = sd_alloc_ctl_cpu_table(i);
e                3545 kernel/trace/trace.c 	unsigned long t, e;
e                3552 kernel/trace/trace.c 		get_total_entries_cpu(buf, &t, &e, cpu);
e                3554 kernel/trace/trace.c 		*entries += e;
e                2108 kernel/trace/trace_events_filter.c 		    .e = ve, .f = vf, .g = vg, .h = vh }, \
e                  12 kernel/trace/trace_events_filter_test.h 	TP_PROTO(int a, int b, int c, int d, int e, int f, int g, int h),
e                  14 kernel/trace/trace_events_filter_test.h 	TP_ARGS(a, b, c, d, e, f, g, h),
e                  21 kernel/trace/trace_events_filter_test.h 		__field(int, e)
e                  32 kernel/trace/trace_events_filter_test.h 		__entry->e = e;
e                  40 kernel/trace/trace_events_filter_test.h 		  __entry->e, __entry->f, __entry->g, __entry->h)
e                 665 kernel/trace/trace_output.c 	struct trace_event *e;
e                 677 kernel/trace/trace_output.c 	list_for_each_entry(e, &ftrace_event_list, list) {
e                 678 kernel/trace/trace_output.c 		if (e->type != last + 1)
e                 687 kernel/trace/trace_output.c 	*list = &e->list;
e                 223 kernel/user_namespace.c static int cmp_map_id(const void *k, const void *e)
e                 227 kernel/user_namespace.c 	const struct uid_gid_extent *el = e;
e                 976 kernel/user_namespace.c 		struct uid_gid_extent *e;
e                 980 kernel/user_namespace.c 			e = &new_map.extent[idx];
e                 982 kernel/user_namespace.c 			e = &new_map.forward[idx];
e                 985 kernel/user_namespace.c 						e->lower_first,
e                 986 kernel/user_namespace.c 						e->count);
e                 994 kernel/user_namespace.c 		e->lower_first = lower_first;
e                 649 lib/bch.c      	unsigned int a, b, c, d, e = 0, f, a2, b2, c2, e4;
e                 669 lib/bch.c      			e = a_pow(bch, l/2);
e                 678 lib/bch.c      			b = gf_mul(bch, a, e)^b;
e                 699 lib/bch.c      			roots[i] = a_ilog(bch, f^e);
e                  48 lib/crypto/sha256.c 	u32 a, b, c, d, e, f, g, h, t1, t2;
e                  62 lib/crypto/sha256.c 	e = state[4];  f = state[5];  g = state[6];  h = state[7];
e                  65 lib/crypto/sha256.c 	t1 = h + e1(e) + Ch(e, f, g) + 0x428a2f98 + W[0];
e                  67 lib/crypto/sha256.c 	t1 = g + e1(d) + Ch(d, e, f) + 0x71374491 + W[1];
e                  69 lib/crypto/sha256.c 	t1 = f + e1(c) + Ch(c, d, e) + 0xb5c0fbcf + W[2];
e                  71 lib/crypto/sha256.c 	t1 = e + e1(b) + Ch(b, c, d) + 0xe9b5dba5 + W[3];
e                  72 lib/crypto/sha256.c 	t2 = e0(f) + Maj(f, g, h);    a += t1;    e = t1 + t2;
e                  74 lib/crypto/sha256.c 	t2 = e0(e) + Maj(e, f, g);    h += t1;    d = t1 + t2;
e                  76 lib/crypto/sha256.c 	t2 = e0(d) + Maj(d, e, f);    g += t1;    c = t1 + t2;
e                  78 lib/crypto/sha256.c 	t2 = e0(c) + Maj(c, d, e);    f += t1;    b = t1 + t2;
e                  80 lib/crypto/sha256.c 	t2 = e0(b) + Maj(b, c, d);    e += t1;    a = t1 + t2;
e                  82 lib/crypto/sha256.c 	t1 = h + e1(e) + Ch(e, f, g) + 0xd807aa98 + W[8];
e                  84 lib/crypto/sha256.c 	t1 = g + e1(d) + Ch(d, e, f) + 0x12835b01 + W[9];
e                  86 lib/crypto/sha256.c 	t1 = f + e1(c) + Ch(c, d, e) + 0x243185be + W[10];
e                  88 lib/crypto/sha256.c 	t1 = e + e1(b) + Ch(b, c, d) + 0x550c7dc3 + W[11];
e                  89 lib/crypto/sha256.c 	t2 = e0(f) + Maj(f, g, h);    a += t1;    e = t1 + t2;
e                  91 lib/crypto/sha256.c 	t2 = e0(e) + Maj(e, f, g);    h += t1;    d = t1 + t2;
e                  93 lib/crypto/sha256.c 	t2 = e0(d) + Maj(d, e, f);    g += t1;    c = t1 + t2;
e                  95 lib/crypto/sha256.c 	t2 = e0(c) + Maj(c, d, e);    f += t1;    b = t1 + t2;
e                  97 lib/crypto/sha256.c 	t2 = e0(b) + Maj(b, c, d);    e += t1;    a = t1 + t2;
e                  99 lib/crypto/sha256.c 	t1 = h + e1(e) + Ch(e, f, g) + 0xe49b69c1 + W[16];
e                 101 lib/crypto/sha256.c 	t1 = g + e1(d) + Ch(d, e, f) + 0xefbe4786 + W[17];
e                 103 lib/crypto/sha256.c 	t1 = f + e1(c) + Ch(c, d, e) + 0x0fc19dc6 + W[18];
e                 105 lib/crypto/sha256.c 	t1 = e + e1(b) + Ch(b, c, d) + 0x240ca1cc + W[19];
e                 106 lib/crypto/sha256.c 	t2 = e0(f) + Maj(f, g, h);    a += t1;    e = t1 + t2;
e                 108 lib/crypto/sha256.c 	t2 = e0(e) + Maj(e, f, g);    h += t1;    d = t1 + t2;
e                 110 lib/crypto/sha256.c 	t2 = e0(d) + Maj(d, e, f);    g += t1;    c = t1 + t2;
e                 112 lib/crypto/sha256.c 	t2 = e0(c) + Maj(c, d, e);    f += t1;    b = t1 + t2;
e                 114 lib/crypto/sha256.c 	t2 = e0(b) + Maj(b, c, d);    e += t1;    a = t1 + t2;
e                 116 lib/crypto/sha256.c 	t1 = h + e1(e) + Ch(e, f, g) + 0x983e5152 + W[24];
e                 118 lib/crypto/sha256.c 	t1 = g + e1(d) + Ch(d, e, f) + 0xa831c66d + W[25];
e                 120 lib/crypto/sha256.c 	t1 = f + e1(c) + Ch(c, d, e) + 0xb00327c8 + W[26];
e                 122 lib/crypto/sha256.c 	t1 = e + e1(b) + Ch(b, c, d) + 0xbf597fc7 + W[27];
e                 123 lib/crypto/sha256.c 	t2 = e0(f) + Maj(f, g, h);    a += t1;    e = t1 + t2;
e                 125 lib/crypto/sha256.c 	t2 = e0(e) + Maj(e, f, g);    h += t1;    d = t1 + t2;
e                 127 lib/crypto/sha256.c 	t2 = e0(d) + Maj(d, e, f);    g += t1;    c = t1 + t2;
e                 129 lib/crypto/sha256.c 	t2 = e0(c) + Maj(c, d, e);    f += t1;    b = t1 + t2;
e                 131 lib/crypto/sha256.c 	t2 = e0(b) + Maj(b, c, d);    e += t1;    a = t1 + t2;
e                 133 lib/crypto/sha256.c 	t1 = h + e1(e) + Ch(e, f, g) + 0x27b70a85 + W[32];
e                 135 lib/crypto/sha256.c 	t1 = g + e1(d) + Ch(d, e, f) + 0x2e1b2138 + W[33];
e                 137 lib/crypto/sha256.c 	t1 = f + e1(c) + Ch(c, d, e) + 0x4d2c6dfc + W[34];
e                 139 lib/crypto/sha256.c 	t1 = e + e1(b) + Ch(b, c, d) + 0x53380d13 + W[35];
e                 140 lib/crypto/sha256.c 	t2 = e0(f) + Maj(f, g, h);    a += t1;    e = t1 + t2;
e                 142 lib/crypto/sha256.c 	t2 = e0(e) + Maj(e, f, g);    h += t1;    d = t1 + t2;
e                 144 lib/crypto/sha256.c 	t2 = e0(d) + Maj(d, e, f);    g += t1;    c = t1 + t2;
e                 146 lib/crypto/sha256.c 	t2 = e0(c) + Maj(c, d, e);    f += t1;    b = t1 + t2;
e                 148 lib/crypto/sha256.c 	t2 = e0(b) + Maj(b, c, d);    e += t1;    a = t1 + t2;
e                 150 lib/crypto/sha256.c 	t1 = h + e1(e) + Ch(e, f, g) + 0xa2bfe8a1 + W[40];
e                 152 lib/crypto/sha256.c 	t1 = g + e1(d) + Ch(d, e, f) + 0xa81a664b + W[41];
e                 154 lib/crypto/sha256.c 	t1 = f + e1(c) + Ch(c, d, e) + 0xc24b8b70 + W[42];
e                 156 lib/crypto/sha256.c 	t1 = e + e1(b) + Ch(b, c, d) + 0xc76c51a3 + W[43];
e                 157 lib/crypto/sha256.c 	t2 = e0(f) + Maj(f, g, h);    a += t1;    e = t1 + t2;
e                 159 lib/crypto/sha256.c 	t2 = e0(e) + Maj(e, f, g);    h += t1;    d = t1 + t2;
e                 161 lib/crypto/sha256.c 	t2 = e0(d) + Maj(d, e, f);    g += t1;    c = t1 + t2;
e                 163 lib/crypto/sha256.c 	t2 = e0(c) + Maj(c, d, e);    f += t1;    b = t1 + t2;
e                 165 lib/crypto/sha256.c 	t2 = e0(b) + Maj(b, c, d);    e += t1;    a = t1 + t2;
e                 167 lib/crypto/sha256.c 	t1 = h + e1(e) + Ch(e, f, g) + 0x19a4c116 + W[48];
e                 169 lib/crypto/sha256.c 	t1 = g + e1(d) + Ch(d, e, f) + 0x1e376c08 + W[49];
e                 171 lib/crypto/sha256.c 	t1 = f + e1(c) + Ch(c, d, e) + 0x2748774c + W[50];
e                 173 lib/crypto/sha256.c 	t1 = e + e1(b) + Ch(b, c, d) + 0x34b0bcb5 + W[51];
e                 174 lib/crypto/sha256.c 	t2 = e0(f) + Maj(f, g, h);    a += t1;    e = t1 + t2;
e                 176 lib/crypto/sha256.c 	t2 = e0(e) + Maj(e, f, g);    h += t1;    d = t1 + t2;
e                 178 lib/crypto/sha256.c 	t2 = e0(d) + Maj(d, e, f);    g += t1;    c = t1 + t2;
e                 180 lib/crypto/sha256.c 	t2 = e0(c) + Maj(c, d, e);    f += t1;    b = t1 + t2;
e                 182 lib/crypto/sha256.c 	t2 = e0(b) + Maj(b, c, d);    e += t1;    a = t1 + t2;
e                 184 lib/crypto/sha256.c 	t1 = h + e1(e) + Ch(e, f, g) + 0x748f82ee + W[56];
e                 186 lib/crypto/sha256.c 	t1 = g + e1(d) + Ch(d, e, f) + 0x78a5636f + W[57];
e                 188 lib/crypto/sha256.c 	t1 = f + e1(c) + Ch(c, d, e) + 0x84c87814 + W[58];
e                 190 lib/crypto/sha256.c 	t1 = e + e1(b) + Ch(b, c, d) + 0x8cc70208 + W[59];
e                 191 lib/crypto/sha256.c 	t2 = e0(f) + Maj(f, g, h);    a += t1;    e = t1 + t2;
e                 193 lib/crypto/sha256.c 	t2 = e0(e) + Maj(e, f, g);    h += t1;    d = t1 + t2;
e                 195 lib/crypto/sha256.c 	t2 = e0(d) + Maj(d, e, f);    g += t1;    c = t1 + t2;
e                 197 lib/crypto/sha256.c 	t2 = e0(c) + Maj(c, d, e);    f += t1;    b = t1 + t2;
e                 199 lib/crypto/sha256.c 	t2 = e0(b) + Maj(b, c, d);    e += t1;    a = t1 + t2;
e                 202 lib/crypto/sha256.c 	state[4] += e; state[5] += f; state[6] += g; state[7] += h;
e                 205 lib/crypto/sha256.c 	a = b = c = d = e = f = g = h = t1 = t2 = 0;
e                 195 lib/decompress_unxz.c 	uint8_t *e = b + size;
e                 197 lib/decompress_unxz.c 	while (b != e)
e                 140 lib/inflate.c    uch e;                /* number of extra bits or operation */
e                 327 lib/inflate.c  	const ush *e,           /* list of extra bits for non-simple codes */
e                 508 lib/inflate.c            r.e = (uch)(16 + j);  /* bits in this table */
e                 520 lib/inflate.c          r.e = 99;               /* out of values--invalid code */
e                 523 lib/inflate.c          r.e = (uch)(*p < 256 ? 16 : 15);    /* 256 is end-of-block code */
e                 529 lib/inflate.c          r.e = (uch)e[*p - s];   /* non-simple--look up in lists */
e                 598 lib/inflate.c    register unsigned e;  /* table entry flag/number of extra bits */
e                 618 lib/inflate.c      if ((e = (t = tl + ((unsigned)b & ml))->e) > 16)
e                 620 lib/inflate.c          if (e == 99)
e                 623 lib/inflate.c          e -= 16;
e                 624 lib/inflate.c          NEEDBITS(e)
e                 625 lib/inflate.c        } while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16);
e                 627 lib/inflate.c      if (e == 16)                /* then it's a literal */
e                 640 lib/inflate.c        if (e == 15)
e                 644 lib/inflate.c        NEEDBITS(e)
e                 645 lib/inflate.c        n = t->v.n + ((unsigned)b & mask_bits[e]);
e                 646 lib/inflate.c        DUMPBITS(e);
e                 650 lib/inflate.c        if ((e = (t = td + ((unsigned)b & md))->e) > 16)
e                 652 lib/inflate.c            if (e == 99)
e                 655 lib/inflate.c            e -= 16;
e                 656 lib/inflate.c            NEEDBITS(e)
e                 657 lib/inflate.c          } while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16);
e                 659 lib/inflate.c        NEEDBITS(e)
e                 660 lib/inflate.c        d = w - t->v.n - ((unsigned)b & mask_bits[e]);
e                 661 lib/inflate.c        DUMPBITS(e)
e                 666 lib/inflate.c          n -= (e = (e = WSIZE - ((d &= WSIZE-1) > w ? d : w)) > n ? n : e);
e                 668 lib/inflate.c          if (w - d >= e)         /* (this test assumes unsigned comparison) */
e                 670 lib/inflate.c            memcpy(slide + w, slide + d, e);
e                 671 lib/inflate.c            w += e;
e                 672 lib/inflate.c            d += e;
e                 679 lib/inflate.c            } while (--e);
e                1035 lib/inflate.c  	int *e                  /* last block flag */
e                1052 lib/inflate.c    *e = (int)b & 1;
e                1088 lib/inflate.c    int e;                /* last block flag */
e                1105 lib/inflate.c      r = inflate_block(&e);
e                1110 lib/inflate.c    } while (!e);
e                1152 lib/inflate.c    unsigned long e;      /* polynomial exclusive-or pattern */
e                1160 lib/inflate.c    e = 0;
e                1162 lib/inflate.c      e |= 1L << (31 - p[i]);
e                1171 lib/inflate.c        c = c & 1 ? (c >> 1) ^ e : c >> 1;
e                1173 lib/inflate.c          c ^= e;
e                1182 lib/locking-selftest.c #define dotest_rt(fn, e, m)	dotest((fn), (e), (m))
e                1184 lib/locking-selftest.c #define dotest_rt(fn, e, m)
e                  39 lib/lru_cache.c #define PARANOIA_LC_ELEMENT(lc, e) do {	\
e                  41 lib/lru_cache.c 	struct lc_element *e_ = (e);	\
e                  94 lib/lru_cache.c 	struct lc_element *e;
e                 138 lib/lru_cache.c 		e = p + e_off;
e                 139 lib/lru_cache.c 		e->lc_index = i;
e                 140 lib/lru_cache.c 		e->lc_number = LC_FREE;
e                 141 lib/lru_cache.c 		e->lc_new_number = LC_FREE;
e                 142 lib/lru_cache.c 		list_add(&e->list, &lc->free);
e                 143 lib/lru_cache.c 		element[i] = e;
e                 212 lib/lru_cache.c 		struct lc_element *e = lc->lc_element[i];
e                 213 lib/lru_cache.c 		void *p = e;
e                 217 lib/lru_cache.c 		e->lc_index = i;
e                 218 lib/lru_cache.c 		e->lc_number = LC_FREE;
e                 219 lib/lru_cache.c 		e->lc_new_number = LC_FREE;
e                 220 lib/lru_cache.c 		list_add(&e->list, &lc->free);
e                 252 lib/lru_cache.c 	struct lc_element *e;
e                 256 lib/lru_cache.c 	hlist_for_each_entry(e, lc_hash_slot(lc, enr), colision) {
e                 260 lib/lru_cache.c 		if (e->lc_new_number != enr)
e                 262 lib/lru_cache.c 		if (e->lc_new_number == e->lc_number || include_changing)
e                 263 lib/lru_cache.c 			return e;
e                 297 lib/lru_cache.c 	struct lc_element *e = __lc_find(lc, enr, 1);
e                 298 lib/lru_cache.c 	return e && e->refcnt;
e                 309 lib/lru_cache.c void lc_del(struct lru_cache *lc, struct lc_element *e)
e                 312 lib/lru_cache.c 	PARANOIA_LC_ELEMENT(lc, e);
e                 313 lib/lru_cache.c 	BUG_ON(e->refcnt);
e                 315 lib/lru_cache.c 	e->lc_number = e->lc_new_number = LC_FREE;
e                 316 lib/lru_cache.c 	hlist_del_init(&e->colision);
e                 317 lib/lru_cache.c 	list_move(&e->list, &lc->free);
e                 324 lib/lru_cache.c 	struct lc_element *e;
e                 333 lib/lru_cache.c 	e = list_entry(n, struct lc_element, list);
e                 334 lib/lru_cache.c 	PARANOIA_LC_ELEMENT(lc, e);
e                 336 lib/lru_cache.c 	e->lc_new_number = new_number;
e                 337 lib/lru_cache.c 	if (!hlist_unhashed(&e->colision))
e                 338 lib/lru_cache.c 		__hlist_del(&e->colision);
e                 339 lib/lru_cache.c 	hlist_add_head(&e->colision, lc_hash_slot(lc, new_number));
e                 340 lib/lru_cache.c 	list_move(&e->list, &lc->to_be_changed);
e                 342 lib/lru_cache.c 	return e;
e                 363 lib/lru_cache.c 	struct lc_element *e;
e                 371 lib/lru_cache.c 	e = __lc_find(lc, enr, 1);
e                 376 lib/lru_cache.c 	if (e) {
e                 377 lib/lru_cache.c 		if (e->lc_new_number != e->lc_number) {
e                 386 lib/lru_cache.c 			++e->refcnt;
e                 388 lib/lru_cache.c 			RETURN(e);
e                 392 lib/lru_cache.c 		if (e->refcnt++ == 0)
e                 394 lib/lru_cache.c 		list_move(&e->list, &lc->in_use); /* Not evictable... */
e                 395 lib/lru_cache.c 		RETURN(e);
e                 429 lib/lru_cache.c 	e = lc_prepare_for_change(lc, enr);
e                 430 lib/lru_cache.c 	BUG_ON(!e);
e                 433 lib/lru_cache.c 	BUG_ON(++e->refcnt != 1);
e                 437 lib/lru_cache.c 	RETURN(e);
e                 536 lib/lru_cache.c 	struct lc_element *e, *tmp;
e                 539 lib/lru_cache.c 	list_for_each_entry_safe(e, tmp, &lc->to_be_changed, list) {
e                 542 lib/lru_cache.c 		e->lc_number = e->lc_new_number;
e                 543 lib/lru_cache.c 		list_move(&e->list, &lc->in_use);
e                 559 lib/lru_cache.c unsigned int lc_put(struct lru_cache *lc, struct lc_element *e)
e                 562 lib/lru_cache.c 	PARANOIA_LC_ELEMENT(lc, e);
e                 563 lib/lru_cache.c 	BUG_ON(e->refcnt == 0);
e                 564 lib/lru_cache.c 	BUG_ON(e->lc_number != e->lc_new_number);
e                 565 lib/lru_cache.c 	if (--e->refcnt == 0) {
e                 567 lib/lru_cache.c 		list_move(&e->list, &lc->lru);
e                 571 lib/lru_cache.c 	RETURN(e->refcnt);
e                 592 lib/lru_cache.c unsigned int lc_index_of(struct lru_cache *lc, struct lc_element *e)
e                 594 lib/lru_cache.c 	PARANOIA_LC_ELEMENT(lc, e);
e                 595 lib/lru_cache.c 	return e->lc_index;
e                 608 lib/lru_cache.c 	struct lc_element *e;
e                 614 lib/lru_cache.c 	e = lc_element_by_index(lc, index);
e                 615 lib/lru_cache.c 	BUG_ON(e->lc_number != e->lc_new_number);
e                 616 lib/lru_cache.c 	BUG_ON(e->refcnt != 0);
e                 618 lib/lru_cache.c 	e->lc_number = e->lc_new_number = enr;
e                 619 lib/lru_cache.c 	hlist_del_init(&e->colision);
e                 623 lib/lru_cache.c 		hlist_add_head(&e->colision, lc_hash_slot(lc, enr));
e                 626 lib/lru_cache.c 	list_move(&e->list, lh);
e                 642 lib/lru_cache.c 	struct lc_element *e;
e                 647 lib/lru_cache.c 		e = lc_element_by_index(lc, i);
e                 648 lib/lru_cache.c 		if (e->lc_number != e->lc_new_number)
e                 650 lib/lru_cache.c 				i, e->lc_number, e->lc_new_number, e->refcnt);
e                 653 lib/lru_cache.c 				i, e->lc_number, "-\"-", e->refcnt);
e                 655 lib/lru_cache.c 			detail(seq, e);
e                 164 lib/lz4/lz4defs.h 	BYTE *const e = (BYTE *)dstEnd;
e                 170 lib/lz4/lz4defs.h 	} while (d < e);
e                 152 lib/mpi/mpi-pow.c 		mpi_limb_t e;
e                 162 lib/mpi/mpi-pow.c 		e = ep[i];
e                 163 lib/mpi/mpi-pow.c 		c = count_leading_zeros(e);
e                 164 lib/mpi/mpi-pow.c 		e = (e << c) << 1;	/* shift the exp bits to the left, lose msb */
e                 215 lib/mpi/mpi-pow.c 				if ((mpi_limb_signed_t) e < 0) {
e                 243 lib/mpi/mpi-pow.c 				e <<= 1;
e                 251 lib/mpi/mpi-pow.c 			e = ep[i];
e                 442 lib/reed_solomon/test_rslib.c static int run_exercise(struct etab *e)
e                 444 lib/reed_solomon/test_rslib.c 	int nn = (1 << e->symsize) - 1;
e                 445 lib/reed_solomon/test_rslib.c 	int kk = nn - e->nroots;
e                 453 lib/reed_solomon/test_rslib.c 	rsc = init_rs(e->symsize, e->genpoly, e->fcs, e->prim, e->nroots);
e                 475 lib/reed_solomon/test_rslib.c 		retval |= exercise_rs(rsc, ws, len, e->ntrials);
e                 477 lib/reed_solomon/test_rslib.c 			retval |= exercise_rs_bc(rsc, ws, len, e->ntrials);
e                 167 lib/test_hexdump.c 	int ae, he, e, f, r;
e                 184 lib/test_hexdump.c 		e = ae;
e                 186 lib/test_hexdump.c 		e = he;
e                 188 lib/test_hexdump.c 	f = min_t(int, e + 1, buflen);
e                 195 lib/test_hexdump.c 	a = r == e && !memcmp(test, buf, TEST_HEXDUMP_BUF_SIZE);
e                 203 lib/test_hexdump.c 		pr_err("Expect: %d '%s'\n", e, test);
e                  66 lib/zstd/fse_decompress.c 		size_t const e = f; \
e                  67 lib/zstd/fse_decompress.c 		if (FSE_isError(e)) \
e                  68 lib/zstd/fse_decompress.c 			return e;   \
e                  56 lib/zstd/huf_compress.c #define CHECK_V_F(e, f)     \
e                  57 lib/zstd/huf_compress.c 	size_t const e = f; \
e                  58 lib/zstd/huf_compress.c 	if (ERR_isError(e)) \
e                  47 lib/zstd/zstd_internal.h #define CHECK_E(f, e)                    \
e                  51 lib/zstd/zstd_internal.h 			return ERROR(e); \
e                1688 mm/vmalloc.c   				unsigned long s, e;
e                1691 mm/vmalloc.c   				e = va_start + (vb->dirty_max << PAGE_SHIFT);
e                1694 mm/vmalloc.c   				end   = max(e, end);
e                 789 net/atm/clip.c static struct clip_vcc *clip_seq_next_vcc(struct atmarp_entry *e,
e                 793 net/atm/clip.c 		curr = e->vccs;
e                 807 net/atm/clip.c 			       struct atmarp_entry *e, loff_t * pos)
e                 811 net/atm/clip.c 	vcc = clip_seq_next_vcc(e, vcc);
e                 814 net/atm/clip.c 			vcc = clip_seq_next_vcc(e, vcc);
e                 835 net/atm/lec.c  	struct hlist_node *e = state->node;
e                 837 net/atm/lec.c  	if (!e)
e                 838 net/atm/lec.c  		e = tbl->first;
e                 839 net/atm/lec.c  	if (e == SEQ_START_TOKEN) {
e                 840 net/atm/lec.c  		e = tbl->first;
e                 844 net/atm/lec.c  	for (; e; e = e->next) {
e                 848 net/atm/lec.c  	state->node = e;
e                1088 net/bluetooth/hci_core.c 	struct inquiry_entry *e;
e                1092 net/bluetooth/hci_core.c 	list_for_each_entry(e, &cache->all, all) {
e                1093 net/bluetooth/hci_core.c 		if (!bacmp(&e->data.bdaddr, bdaddr))
e                1094 net/bluetooth/hci_core.c 			return e;
e                1104 net/bluetooth/hci_core.c 	struct inquiry_entry *e;
e                1108 net/bluetooth/hci_core.c 	list_for_each_entry(e, &cache->unknown, list) {
e                1109 net/bluetooth/hci_core.c 		if (!bacmp(&e->data.bdaddr, bdaddr))
e                1110 net/bluetooth/hci_core.c 			return e;
e                1121 net/bluetooth/hci_core.c 	struct inquiry_entry *e;
e                1125 net/bluetooth/hci_core.c 	list_for_each_entry(e, &cache->resolve, list) {
e                1126 net/bluetooth/hci_core.c 		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
e                1127 net/bluetooth/hci_core.c 			return e;
e                1128 net/bluetooth/hci_core.c 		if (!bacmp(&e->data.bdaddr, bdaddr))
e                1129 net/bluetooth/hci_core.c 			return e;
e                1220 net/bluetooth/hci_core.c 	struct inquiry_entry *e;
e                1223 net/bluetooth/hci_core.c 	list_for_each_entry(e, &cache->all, all) {
e                1224 net/bluetooth/hci_core.c 		struct inquiry_data *data = &e->data;
e                 341 net/bluetooth/hci_debugfs.c 	struct inquiry_entry *e;
e                 345 net/bluetooth/hci_debugfs.c 	list_for_each_entry(e, &cache->all, all) {
e                 346 net/bluetooth/hci_debugfs.c 		struct inquiry_data *data = &e->data;
e                 353 net/bluetooth/hci_debugfs.c 			   data->rssi, data->ssp_mode, e->timestamp);
e                1921 net/bluetooth/hci_event.c 				   struct inquiry_entry *e)
e                1927 net/bluetooth/hci_event.c 	bacpy(&cp.bdaddr, &e->data.bdaddr);
e                1928 net/bluetooth/hci_event.c 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
e                1929 net/bluetooth/hci_event.c 	cp.pscan_mode = e->data.pscan_mode;
e                1930 net/bluetooth/hci_event.c 	cp.clock_offset = e->data.clock_offset;
e                1938 net/bluetooth/hci_event.c 	struct inquiry_entry *e;
e                1943 net/bluetooth/hci_event.c 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
e                1944 net/bluetooth/hci_event.c 	if (!e)
e                1947 net/bluetooth/hci_event.c 	if (hci_resolve_name(hdev, e) == 0) {
e                1948 net/bluetooth/hci_event.c 		e->name_state = NAME_PENDING;
e                1959 net/bluetooth/hci_event.c 	struct inquiry_entry *e;
e                1980 net/bluetooth/hci_event.c 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
e                1985 net/bluetooth/hci_event.c 	if (!e)
e                1988 net/bluetooth/hci_event.c 	list_del(&e->list);
e                1990 net/bluetooth/hci_event.c 		e->name_state = NAME_KNOWN;
e                1992 net/bluetooth/hci_event.c 				 e->data.rssi, name, name_len);
e                1994 net/bluetooth/hci_event.c 		e->name_state = NAME_NOT_KNOWN;
e                2381 net/bluetooth/hci_event.c 	struct inquiry_entry *e;
e                2415 net/bluetooth/hci_event.c 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
e                2416 net/bluetooth/hci_event.c 	if (e && hci_resolve_name(hdev, e) == 0) {
e                2417 net/bluetooth/hci_event.c 		e->name_state = NAME_PENDING;
e                2632 net/bluetooth/hci_request.c 	struct inquiry_entry *e;
e                2660 net/bluetooth/hci_request.c 		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
e                2662 net/bluetooth/hci_request.c 		if (!e)
e                2665 net/bluetooth/hci_request.c 		bacpy(&cp.bdaddr, &e->data.bdaddr);
e                4086 net/bluetooth/mgmt.c 	struct inquiry_entry *e;
e                4100 net/bluetooth/mgmt.c 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
e                4101 net/bluetooth/mgmt.c 	if (!e) {
e                4109 net/bluetooth/mgmt.c 		e->name_state = NAME_KNOWN;
e                4110 net/bluetooth/mgmt.c 		list_del(&e->list);
e                4112 net/bluetooth/mgmt.c 		e->name_state = NAME_NEEDED;
e                4113 net/bluetooth/mgmt.c 		hci_inquiry_cache_update_resolve(hdev, e);
e                1465 net/bluetooth/smp.c 	       local_addr, remote_addr, check.e);
e                2810 net/bluetooth/smp.c 	u8 io_cap[3], r[16], e[16];
e                2841 net/bluetooth/smp.c 		     io_cap, remote_addr, local_addr, e);
e                2845 net/bluetooth/smp.c 	if (crypto_memneq(check->e, e, 16))
e                 117 net/bluetooth/smp.h 	__u8	e[16];
e                 202 net/bridge/br_input.c 	struct nf_hook_entries *e = NULL;
e                 214 net/bridge/br_input.c 	e = rcu_dereference(net->nf.hooks_bridge[NF_BR_PRE_ROUTING]);
e                 215 net/bridge/br_input.c 	if (!e)
e                 222 net/bridge/br_input.c 	for (i = 0; i < e->num_hook_entries; i++) {
e                 223 net/bridge/br_input.c 		verdict = nf_hook_entry_hookfn(&e->hooks[i], skb, &state);
e                  57 net/bridge/br_mdb.c static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
e                  59 net/bridge/br_mdb.c 	e->state = flags & MDB_PG_FLAGS_PERMANENT;
e                  60 net/bridge/br_mdb.c 	e->flags = 0;
e                  62 net/bridge/br_mdb.c 		e->flags |= MDB_FLAGS_OFFLOAD;
e                  64 net/bridge/br_mdb.c 		e->flags |= MDB_FLAGS_FAST_LEAVE;
e                  86 net/bridge/br_mdb.c 	struct br_mdb_entry e;
e                  90 net/bridge/br_mdb.c 	memset(&e, 0, sizeof(e));
e                 100 net/bridge/br_mdb.c 	__mdb_entry_fill_flags(&e, flags);
e                 101 net/bridge/br_mdb.c 	e.ifindex = ifindex;
e                 102 net/bridge/br_mdb.c 	e.vid = mp->addr.vid;
e                 104 net/bridge/br_mdb.c 		e.addr.u.ip4 = mp->addr.u.ip4;
e                 107 net/bridge/br_mdb.c 		e.addr.u.ip6 = mp->addr.u.ip6;
e                 109 net/bridge/br_mdb.c 	e.addr.proto = mp->addr.proto;
e                 115 net/bridge/br_mdb.c 	if (nla_put_nohdr(skb, sizeof(e), &e) ||
e                1002 net/bridge/br_netfilter_hooks.c 	const struct nf_hook_entries *e;
e                1008 net/bridge/br_netfilter_hooks.c 	e = rcu_dereference(net->nf.hooks_bridge[hook]);
e                1009 net/bridge/br_netfilter_hooks.c 	if (!e)
e                1012 net/bridge/br_netfilter_hooks.c 	ops = nf_hook_entries_get_hook_ops(e);
e                1013 net/bridge/br_netfilter_hooks.c 	for (i = 0; i < e->num_hook_entries &&
e                1020 net/bridge/br_netfilter_hooks.c 	ret = nf_hook_slow(skb, &state, e, i);
e                 104 net/bridge/netfilter/ebt_arp.c 	const struct ebt_entry *e = par->entryinfo;
e                 106 net/bridge/netfilter/ebt_arp.c 	if ((e->ethproto != htons(ETH_P_ARP) &&
e                 107 net/bridge/netfilter/ebt_arp.c 	   e->ethproto != htons(ETH_P_RARP)) ||
e                 108 net/bridge/netfilter/ebt_arp.c 	   e->invflags & EBT_IPROTO)
e                  65 net/bridge/netfilter/ebt_arpreply.c 	const struct ebt_entry *e = par->entryinfo;
e                  69 net/bridge/netfilter/ebt_arpreply.c 	if (e->ethproto != htons(ETH_P_ARP) ||
e                  70 net/bridge/netfilter/ebt_arpreply.c 	    e->invflags & EBT_IPROTO)
e                 107 net/bridge/netfilter/ebt_ip.c 	const struct ebt_entry *e = par->entryinfo;
e                 109 net/bridge/netfilter/ebt_ip.c 	if (e->ethproto != htons(ETH_P_IP) ||
e                 110 net/bridge/netfilter/ebt_ip.c 	   e->invflags & EBT_IPROTO)
e                 108 net/bridge/netfilter/ebt_ip6.c 	const struct ebt_entry *e = par->entryinfo;
e                 111 net/bridge/netfilter/ebt_ip6.c 	if (e->ethproto != htons(ETH_P_IPV6) || e->invflags & EBT_IPROTO)
e                 157 net/bridge/netfilter/ebt_stp.c 	const struct ebt_entry *e = par->entryinfo;
e                 164 net/bridge/netfilter/ebt_stp.c 	    (!ether_addr_equal(e->destmac, eth_stp_addr) ||
e                 165 net/bridge/netfilter/ebt_stp.c 	     !(e->bitmask & EBT_DESTMAC) ||
e                 166 net/bridge/netfilter/ebt_stp.c 	     !is_broadcast_ether_addr(e->destmsk)))
e                  80 net/bridge/netfilter/ebt_vlan.c 	const struct ebt_entry *e = par->entryinfo;
e                  83 net/bridge/netfilter/ebt_vlan.c 	if (e->ethproto != htons(ETH_P_8021Q)) {
e                  85 net/bridge/netfilter/ebt_vlan.c 			 ntohs(e->ethproto));
e                 118 net/bridge/netfilter/ebtables.c ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
e                 130 net/bridge/netfilter/ebtables.c 	if (e->bitmask & EBT_802_3) {
e                 131 net/bridge/netfilter/ebtables.c 		if (NF_INVF(e, EBT_IPROTO, eth_proto_is_802_3(ethproto)))
e                 133 net/bridge/netfilter/ebtables.c 	} else if (!(e->bitmask & EBT_NOPROTO) &&
e                 134 net/bridge/netfilter/ebtables.c 		   NF_INVF(e, EBT_IPROTO, e->ethproto != ethproto))
e                 137 net/bridge/netfilter/ebtables.c 	if (NF_INVF(e, EBT_IIN, ebt_dev_check(e->in, in)))
e                 139 net/bridge/netfilter/ebtables.c 	if (NF_INVF(e, EBT_IOUT, ebt_dev_check(e->out, out)))
e                 143 net/bridge/netfilter/ebtables.c 	    NF_INVF(e, EBT_ILOGICALIN,
e                 144 net/bridge/netfilter/ebtables.c 		    ebt_dev_check(e->logical_in, p->br->dev)))
e                 147 net/bridge/netfilter/ebtables.c 	    NF_INVF(e, EBT_ILOGICALOUT,
e                 148 net/bridge/netfilter/ebtables.c 		    ebt_dev_check(e->logical_out, p->br->dev)))
e                 151 net/bridge/netfilter/ebtables.c 	if (e->bitmask & EBT_SOURCEMAC) {
e                 152 net/bridge/netfilter/ebtables.c 		if (NF_INVF(e, EBT_ISOURCE,
e                 153 net/bridge/netfilter/ebtables.c 			    !ether_addr_equal_masked(h->h_source, e->sourcemac,
e                 154 net/bridge/netfilter/ebtables.c 						     e->sourcemsk)))
e                 157 net/bridge/netfilter/ebtables.c 	if (e->bitmask & EBT_DESTMAC) {
e                 158 net/bridge/netfilter/ebtables.c 		if (NF_INVF(e, EBT_IDEST,
e                 159 net/bridge/netfilter/ebtables.c 			    !ether_addr_equal_masked(h->h_dest, e->destmac,
e                 160 net/bridge/netfilter/ebtables.c 						     e->destmsk)))
e                 173 net/bridge/netfilter/ebtables.c ebt_get_target_c(const struct ebt_entry *e)
e                 175 net/bridge/netfilter/ebtables.c 	return ebt_get_target((struct ebt_entry *)e);
e                 260 net/bridge/netfilter/ebtables.c 			point = cs[sp].e;
e                 276 net/bridge/netfilter/ebtables.c 		cs[sp].e = ebt_next_entry(point);
e                 314 net/bridge/netfilter/ebtables.c 	} *e;
e                 317 net/bridge/netfilter/ebtables.c 	list_for_each_entry(e, head, list) {
e                 318 net/bridge/netfilter/ebtables.c 		if (strcmp(e->name, name) == 0)
e                 319 net/bridge/netfilter/ebtables.c 			return e;
e                 357 net/bridge/netfilter/ebtables.c 	const struct ebt_entry *e = par->entryinfo;
e                 359 net/bridge/netfilter/ebtables.c 	size_t left = ((char *)e + e->watchers_offset) - (char *)m;
e                 380 net/bridge/netfilter/ebtables.c 	      ntohs(e->ethproto), e->invflags & EBT_IPROTO);
e                 394 net/bridge/netfilter/ebtables.c 	const struct ebt_entry *e = par->entryinfo;
e                 396 net/bridge/netfilter/ebtables.c 	size_t left = ((char *)e + e->target_offset) - (char *)w;
e                 417 net/bridge/netfilter/ebtables.c 	      ntohs(e->ethproto), e->invflags & EBT_IPROTO);
e                 443 net/bridge/netfilter/ebtables.c 		struct ebt_entry *e = (void *)newinfo->entries + offset;
e                 456 net/bridge/netfilter/ebtables.c 		if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
e                 457 net/bridge/netfilter/ebtables.c 			if (e->bitmask != 0) {
e                 464 net/bridge/netfilter/ebtables.c 				newinfo->hook_entry[i] = (struct ebt_entries *)e;
e                 471 net/bridge/netfilter/ebtables.c 			if (left < e->next_offset)
e                 473 net/bridge/netfilter/ebtables.c 			if (e->next_offset < sizeof(struct ebt_entry))
e                 475 net/bridge/netfilter/ebtables.c 			offset += e->next_offset;
e                 494 net/bridge/netfilter/ebtables.c ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
e                 502 net/bridge/netfilter/ebtables.c 		if ((void *)e == (void *)newinfo->hook_entry[i])
e                 508 net/bridge/netfilter/ebtables.c 	if (i != NF_BR_NUMHOOKS || !e->bitmask) {
e                 515 net/bridge/netfilter/ebtables.c 		if (((struct ebt_entries *)e)->policy != EBT_DROP &&
e                 516 net/bridge/netfilter/ebtables.c 		   ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
e                 519 net/bridge/netfilter/ebtables.c 			   ((struct ebt_entries *)e)->policy != EBT_RETURN)
e                 524 net/bridge/netfilter/ebtables.c 		if (((struct ebt_entries *)e)->counter_offset != *totalcnt)
e                 526 net/bridge/netfilter/ebtables.c 		*n = ((struct ebt_entries *)e)->nentries;
e                 531 net/bridge/netfilter/ebtables.c 	if (sizeof(struct ebt_entry) > e->watchers_offset ||
e                 532 net/bridge/netfilter/ebtables.c 	   e->watchers_offset > e->target_offset ||
e                 533 net/bridge/netfilter/ebtables.c 	   e->target_offset >= e->next_offset)
e                 537 net/bridge/netfilter/ebtables.c 	if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target))
e                 555 net/bridge/netfilter/ebtables.c ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
e                 561 net/bridge/netfilter/ebtables.c 	if (e->bitmask)
e                 564 net/bridge/netfilter/ebtables.c 		if (newinfo->hook_entry[i] == (struct ebt_entries *)e)
e                 571 net/bridge/netfilter/ebtables.c 	udc[*n].cs.chaininfo = (struct ebt_entries *)e;
e                 617 net/bridge/netfilter/ebtables.c ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
e                 622 net/bridge/netfilter/ebtables.c 	if (e->bitmask == 0)
e                 627 net/bridge/netfilter/ebtables.c 	EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
e                 628 net/bridge/netfilter/ebtables.c 	EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
e                 629 net/bridge/netfilter/ebtables.c 	t = ebt_get_target(e);
e                 642 net/bridge/netfilter/ebtables.c ebt_check_entry(struct ebt_entry *e, struct net *net,
e                 656 net/bridge/netfilter/ebtables.c 	if (e->bitmask == 0)
e                 659 net/bridge/netfilter/ebtables.c 	if (e->bitmask & ~EBT_F_MASK)
e                 662 net/bridge/netfilter/ebtables.c 	if (e->invflags & ~EBT_INV_MASK)
e                 665 net/bridge/netfilter/ebtables.c 	if ((e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3))
e                 672 net/bridge/netfilter/ebtables.c 		if ((char *)newinfo->hook_entry[i] < (char *)e)
e                 684 net/bridge/netfilter/ebtables.c 			if ((char *)(cl_s[i].cs.chaininfo) > (char *)e)
e                 697 net/bridge/netfilter/ebtables.c 	mtpar.entryinfo = tgpar.entryinfo = e;
e                 700 net/bridge/netfilter/ebtables.c 	ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i);
e                 704 net/bridge/netfilter/ebtables.c 	ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j);
e                 707 net/bridge/netfilter/ebtables.c 	t = ebt_get_target(e);
e                 708 net/bridge/netfilter/ebtables.c 	gap = e->next_offset - e->target_offset;
e                 743 net/bridge/netfilter/ebtables.c 	      ntohs(e->ethproto), e->invflags & EBT_IPROTO);
e                 751 net/bridge/netfilter/ebtables.c 	EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
e                 753 net/bridge/netfilter/ebtables.c 	EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
e                 765 net/bridge/netfilter/ebtables.c 	const struct ebt_entry *e = (struct ebt_entry *)chain->data;
e                 772 net/bridge/netfilter/ebtables.c 			e = cl_s[chain_nr].cs.e;
e                 785 net/bridge/netfilter/ebtables.c 		t = ebt_get_target_c(e);
e                 788 net/bridge/netfilter/ebtables.c 		if (e->target_offset + sizeof(struct ebt_standard_target) >
e                 789 net/bridge/netfilter/ebtables.c 		   e->next_offset)
e                 811 net/bridge/netfilter/ebtables.c 			cl_s[i].cs.e = ebt_next_entry(e);
e                 812 net/bridge/netfilter/ebtables.c 			e = (struct ebt_entry *)(hlp2->data);
e                 821 net/bridge/netfilter/ebtables.c 		e = ebt_next_entry(e);
e                1347 net/bridge/netfilter/ebtables.c static inline int ebt_entry_to_user(struct ebt_entry *e, const char *base,
e                1354 net/bridge/netfilter/ebtables.c 	if (e->bitmask == 0) {
e                1356 net/bridge/netfilter/ebtables.c 		if (copy_to_user(ubase + ((char *)e - base), e,
e                1362 net/bridge/netfilter/ebtables.c 	if (copy_to_user(ubase + ((char *)e - base), e, sizeof(*e)))
e                1365 net/bridge/netfilter/ebtables.c 	hlp = ubase + (((char *)e + e->target_offset) - base);
e                1366 net/bridge/netfilter/ebtables.c 	t = ebt_get_target_c(e);
e                1368 net/bridge/netfilter/ebtables.c 	ret = EBT_MATCH_ITERATE(e, ebt_match_to_user, base, ubase);
e                1371 net/bridge/netfilter/ebtables.c 	ret = EBT_WATCHER_ITERATE(e, ebt_watcher_to_user, base, ubase);
e                1660 net/bridge/netfilter/ebtables.c static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
e                1669 net/bridge/netfilter/ebtables.c 	if (e->bitmask == 0) {
e                1672 net/bridge/netfilter/ebtables.c 		if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
e                1684 net/bridge/netfilter/ebtables.c 	if (copy_to_user(ce, e, sizeof(*ce)))
e                1690 net/bridge/netfilter/ebtables.c 	ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
e                1693 net/bridge/netfilter/ebtables.c 	watchers_offset = e->watchers_offset - (origsize - *size);
e                1695 net/bridge/netfilter/ebtables.c 	ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
e                1698 net/bridge/netfilter/ebtables.c 	target_offset = e->target_offset - (origsize - *size);
e                1700 net/bridge/netfilter/ebtables.c 	t = ebt_get_target(e);
e                1705 net/bridge/netfilter/ebtables.c 	next_offset = e->next_offset - (origsize - *size);
e                1730 net/bridge/netfilter/ebtables.c static int compat_calc_entry(const struct ebt_entry *e,
e                1739 net/bridge/netfilter/ebtables.c 	if (e->bitmask == 0)
e                1743 net/bridge/netfilter/ebtables.c 	entry_offset = (void *)e - base;
e                1745 net/bridge/netfilter/ebtables.c 	EBT_MATCH_ITERATE(e, compat_calc_match, &off);
e                1746 net/bridge/netfilter/ebtables.c 	EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
e                1748 net/bridge/netfilter/ebtables.c 	t = ebt_get_target_c(e);
e                1762 net/bridge/netfilter/ebtables.c 		    (e < (struct ebt_entry *)(base - hookptr))) {
e                  73 net/caif/caif_dev.c static void caifd_put(struct caif_device_entry *e)
e                  75 net/caif/caif_dev.c 	this_cpu_dec(*e->pcpu_refcnt);
e                  78 net/caif/caif_dev.c static void caifd_hold(struct caif_device_entry *e)
e                  80 net/caif/caif_dev.c 	this_cpu_inc(*e->pcpu_refcnt);
e                  83 net/caif/caif_dev.c static int caifd_refcnt_read(struct caif_device_entry *e)
e                  87 net/caif/caif_dev.c 		refcnt += *per_cpu_ptr(e->pcpu_refcnt, i);
e                  77 net/ceph/crush/hash.c 				      __u32 e)
e                  79 net/ceph/crush/hash.c 	__u32 hash = crush_hash_seed ^ a ^ b ^ c ^ d ^ e;
e                  84 net/ceph/crush/hash.c 	crush_hashmix(e, x, hash);
e                  89 net/ceph/crush/hash.c 	crush_hashmix(y, e, hash);
e                 134 net/ceph/crush/hash.c __u32 crush_hash32_5(int type, __u32 a, __u32 b, __u32 c, __u32 d, __u32 e)
e                 138 net/ceph/crush/hash.c 		return crush_hash32_rjenkins1_5(a, b, c, d, e);
e                  62 net/core/gen_estimator.c static void est_fetch_counters(struct net_rate_estimator *e,
e                  66 net/core/gen_estimator.c 	if (e->stats_lock)
e                  67 net/core/gen_estimator.c 		spin_lock(e->stats_lock);
e                  69 net/core/gen_estimator.c 	__gnet_stats_copy_basic(e->running, b, e->cpu_bstats, e->bstats);
e                  71 net/core/gen_estimator.c 	if (e->stats_lock)
e                  72 net/core/gen_estimator.c 		spin_unlock(e->stats_lock);
e                 493 net/decnet/dn_table.c 	unsigned int e = 0, s_e;
e                 508 net/decnet/dn_table.c 		e = 0;
e                 510 net/decnet/dn_table.c 			if (e < s_e)
e                 519 net/decnet/dn_table.c 			e++;
e                 523 net/decnet/dn_table.c 	cb->args[1] = e;
e                  16 net/dsa/port.c static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v)
e                  21 net/dsa/port.c 	err = raw_notifier_call_chain(nh, e, v);
e                 732 net/dsa/slave.c static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
e                 745 net/dsa/slave.c 	ret = ds->ops->set_mac_eee(ds, dp->index, e);
e                 749 net/dsa/slave.c 	return phylink_ethtool_set_eee(dp->pl, e);
e                 752 net/dsa/slave.c static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
e                 765 net/dsa/slave.c 	ret = ds->ops->get_mac_eee(ds, dp->index, e);
e                 769 net/dsa/slave.c 	return phylink_ethtool_get_eee(dp->pl, e);
e                 981 net/ipv4/fib_frontend.c 	unsigned int e = 0, s_e;
e                1022 net/ipv4/fib_frontend.c 		e = 0;
e                1025 net/ipv4/fib_frontend.c 			if (e < s_e)
e                1039 net/ipv4/fib_frontend.c 			e++;
e                1047 net/ipv4/fib_frontend.c 	cb->args[1] = e;
e                 947 net/ipv4/fou.c size_t fou_encap_hlen(struct ip_tunnel_encap *e)
e                 953 net/ipv4/fou.c size_t gue_encap_hlen(struct ip_tunnel_encap *e)
e                 960 net/ipv4/fou.c 	if (e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) {
e                 971 net/ipv4/fou.c int __fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
e                 980 net/ipv4/fou.c 	*sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
e                 987 net/ipv4/fou.c int __gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
e                 996 net/ipv4/fou.c 	if ((e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) &&
e                1010 net/ipv4/fou.c 	*sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
e                1062 net/ipv4/fou.c static void fou_build_udp(struct sk_buff *skb, struct ip_tunnel_encap *e,
e                1072 net/ipv4/fou.c 	uh->dest = e->dport;
e                1075 net/ipv4/fou.c 	udp_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM), skb,
e                1081 net/ipv4/fou.c static int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
e                1084 net/ipv4/fou.c 	int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM ? SKB_GSO_UDP_TUNNEL_CSUM :
e                1089 net/ipv4/fou.c 	err = __fou_build_header(skb, e, protocol, &sport, type);
e                1093 net/ipv4/fou.c 	fou_build_udp(skb, e, fl4, protocol, sport);
e                1098 net/ipv4/fou.c static int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
e                1101 net/ipv4/fou.c 	int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM ? SKB_GSO_UDP_TUNNEL_CSUM :
e                1106 net/ipv4/fou.c 	err = __gue_build_header(skb, e, protocol, &sport, type);
e                1110 net/ipv4/fou.c 	fou_build_udp(skb, e, fl4, protocol, sport);
e                 657 net/ipv4/ipconfig.c 	u8 *e = options;
e                 662 net/ipv4/ipconfig.c 	memcpy(e, ic_bootp_cookie, 4);	/* RFC1048 Magic Cookie */
e                 663 net/ipv4/ipconfig.c 	e += 4;
e                 665 net/ipv4/ipconfig.c 	*e++ = 53;		/* DHCP message type */
e                 666 net/ipv4/ipconfig.c 	*e++ = 1;
e                 667 net/ipv4/ipconfig.c 	*e++ = mt;
e                 670 net/ipv4/ipconfig.c 		*e++ = 54;	/* Server ID (IP address) */
e                 671 net/ipv4/ipconfig.c 		*e++ = 4;
e                 672 net/ipv4/ipconfig.c 		memcpy(e, &ic_servaddr, 4);
e                 673 net/ipv4/ipconfig.c 		e += 4;
e                 675 net/ipv4/ipconfig.c 		*e++ = 50;	/* Requested IP address */
e                 676 net/ipv4/ipconfig.c 		*e++ = 4;
e                 677 net/ipv4/ipconfig.c 		memcpy(e, &ic_myaddr, 4);
e                 678 net/ipv4/ipconfig.c 		e += 4;
e                 695 net/ipv4/ipconfig.c 		*e++ = 55;	/* Parameter request list */
e                 696 net/ipv4/ipconfig.c 		*e++ = sizeof(ic_req_params);
e                 697 net/ipv4/ipconfig.c 		memcpy(e, ic_req_params, sizeof(ic_req_params));
e                 698 net/ipv4/ipconfig.c 		e += sizeof(ic_req_params);
e                 701 net/ipv4/ipconfig.c 			*e++ = 12;	/* host-name */
e                 703 net/ipv4/ipconfig.c 			*e++ = len;
e                 704 net/ipv4/ipconfig.c 			memcpy(e, utsname()->nodename, len);
e                 705 net/ipv4/ipconfig.c 			e += len;
e                 710 net/ipv4/ipconfig.c 			*e++ = 60;	/* Class-identifier */
e                 712 net/ipv4/ipconfig.c 			*e++ = len;
e                 713 net/ipv4/ipconfig.c 			memcpy(e, vendor_class_identifier, len);
e                 714 net/ipv4/ipconfig.c 			e += len;
e                 720 net/ipv4/ipconfig.c 		if (len >= 1 && len < 312 - (e - options) - 1) {
e                 721 net/ipv4/ipconfig.c 			*e++ = 61;
e                 722 net/ipv4/ipconfig.c 			*e++ = len + 1;
e                 723 net/ipv4/ipconfig.c 			memcpy(e, dhcp_client_identifier, len + 1);
e                 724 net/ipv4/ipconfig.c 			e += len + 1;
e                 728 net/ipv4/ipconfig.c 	*e++ = 255;	/* End of the list */
e                 733 net/ipv4/ipconfig.c static void __init ic_bootp_init_ext(u8 *e)
e                 735 net/ipv4/ipconfig.c 	memcpy(e, ic_bootp_cookie, 4);	/* RFC1048 Magic Cookie */
e                 736 net/ipv4/ipconfig.c 	e += 4;
e                 737 net/ipv4/ipconfig.c 	*e++ = 1;		/* Subnet mask request */
e                 738 net/ipv4/ipconfig.c 	*e++ = 4;
e                 739 net/ipv4/ipconfig.c 	e += 4;
e                 740 net/ipv4/ipconfig.c 	*e++ = 3;		/* Default gateway request */
e                 741 net/ipv4/ipconfig.c 	*e++ = 4;
e                 742 net/ipv4/ipconfig.c 	e += 4;
e                 744 net/ipv4/ipconfig.c 	*e++ = 6;		/* (DNS) name server request */
e                 745 net/ipv4/ipconfig.c 	*e++ = 4 * CONF_NAMESERVERS_MAX;
e                 746 net/ipv4/ipconfig.c 	e += 4 * CONF_NAMESERVERS_MAX;
e                 748 net/ipv4/ipconfig.c 	*e++ = 12;		/* Host name request */
e                 749 net/ipv4/ipconfig.c 	*e++ = 32;
e                 750 net/ipv4/ipconfig.c 	e += 32;
e                 751 net/ipv4/ipconfig.c 	*e++ = 40;		/* NIS Domain name request */
e                 752 net/ipv4/ipconfig.c 	*e++ = 32;
e                 753 net/ipv4/ipconfig.c 	e += 32;
e                 754 net/ipv4/ipconfig.c 	*e++ = 17;		/* Boot path */
e                 755 net/ipv4/ipconfig.c 	*e++ = 40;
e                 756 net/ipv4/ipconfig.c 	e += 40;
e                 758 net/ipv4/ipconfig.c 	*e++ = 57;		/* set extension buffer size for reply */
e                 759 net/ipv4/ipconfig.c 	*e++ = 2;
e                 760 net/ipv4/ipconfig.c 	*e++ = 1;		/* 128+236+8+20+14, see dhcpd sources */
e                 761 net/ipv4/ipconfig.c 	*e++ = 150;
e                 763 net/ipv4/ipconfig.c 	*e++ = 255;		/* End of the list */
e                 745 net/ipv4/ipmr.c 	struct nlmsgerr *e;
e                 756 net/ipv4/ipmr.c 			e = nlmsg_data(nlh);
e                 757 net/ipv4/ipmr.c 			e->error = -ETIMEDOUT;
e                 758 net/ipv4/ipmr.c 			memset(&e->msg, 0, sizeof(e->msg));
e                1007 net/ipv4/ipmr.c 	struct nlmsgerr *e;
e                1023 net/ipv4/ipmr.c 				e = nlmsg_data(nlh);
e                1024 net/ipv4/ipmr.c 				e->error = -EMSGSIZE;
e                1025 net/ipv4/ipmr.c 				memset(&e->msg, 0, sizeof(e->msg));
e                2836 net/ipv4/ipmr.c 	unsigned int e = 0, s_e;
e                2884 net/ipv4/ipmr.c 			if (e < s_e)
e                2893 net/ipv4/ipmr.c 			e++;
e                2896 net/ipv4/ipmr.c 		e = 0;
e                2905 net/ipv4/ipmr.c 	cb->args[1] = e;
e                 296 net/ipv4/ipmr_base.c 	unsigned int e = 0, s_e = cb->args[1];
e                 305 net/ipv4/ipmr_base.c 		if (e < s_e)
e                 316 net/ipv4/ipmr_base.c 		e++;
e                 321 net/ipv4/ipmr_base.c 		if (e < s_e)
e                 334 net/ipv4/ipmr_base.c 		e++;
e                 339 net/ipv4/ipmr_base.c 	cb->args[1] = e;
e                 165 net/ipv4/netfilter/arp_tables.c arpt_get_target_c(const struct arpt_entry *e)
e                 167 net/ipv4/netfilter/arp_tables.c 	return arpt_get_target((struct arpt_entry *)e);
e                 190 net/ipv4/netfilter/arp_tables.c 	struct arpt_entry *e, **jumpstack;
e                 214 net/ipv4/netfilter/arp_tables.c 	e = get_entry(table_base, private->hook_entry[hook]);
e                 224 net/ipv4/netfilter/arp_tables.c 		if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) {
e                 225 net/ipv4/netfilter/arp_tables.c 			e = arpt_next_entry(e);
e                 229 net/ipv4/netfilter/arp_tables.c 		counter = xt_get_this_cpu_counter(&e->counters);
e                 232 net/ipv4/netfilter/arp_tables.c 		t = arpt_get_target_c(e);
e                 246 net/ipv4/netfilter/arp_tables.c 					e = get_entry(table_base,
e                 249 net/ipv4/netfilter/arp_tables.c 					e = jumpstack[--stackidx];
e                 250 net/ipv4/netfilter/arp_tables.c 					e = arpt_next_entry(e);
e                 255 net/ipv4/netfilter/arp_tables.c 			    != arpt_next_entry(e)) {
e                 260 net/ipv4/netfilter/arp_tables.c 				jumpstack[stackidx++] = e;
e                 263 net/ipv4/netfilter/arp_tables.c 			e = get_entry(table_base, v);
e                 274 net/ipv4/netfilter/arp_tables.c 			e = arpt_next_entry(e);
e                 290 net/ipv4/netfilter/arp_tables.c static inline bool unconditional(const struct arpt_entry *e)
e                 294 net/ipv4/netfilter/arp_tables.c 	return e->target_offset == sizeof(struct arpt_entry) &&
e                 295 net/ipv4/netfilter/arp_tables.c 	       memcmp(&e->arp, &uncond, sizeof(uncond)) == 0;
e                 312 net/ipv4/netfilter/arp_tables.c 		struct arpt_entry *e = entry0 + pos;
e                 318 net/ipv4/netfilter/arp_tables.c 		e->counters.pcnt = pos;
e                 322 net/ipv4/netfilter/arp_tables.c 				= (void *)arpt_get_target_c(e);
e                 323 net/ipv4/netfilter/arp_tables.c 			int visited = e->comefrom & (1 << hook);
e                 325 net/ipv4/netfilter/arp_tables.c 			if (e->comefrom & (1 << NF_ARP_NUMHOOKS))
e                 328 net/ipv4/netfilter/arp_tables.c 			e->comefrom
e                 332 net/ipv4/netfilter/arp_tables.c 			if ((unconditional(e) &&
e                 342 net/ipv4/netfilter/arp_tables.c 					e->comefrom ^= (1<<NF_ARP_NUMHOOKS);
e                 344 net/ipv4/netfilter/arp_tables.c 					pos = e->counters.pcnt;
e                 345 net/ipv4/netfilter/arp_tables.c 					e->counters.pcnt = 0;
e                 351 net/ipv4/netfilter/arp_tables.c 					e = entry0 + pos;
e                 352 net/ipv4/netfilter/arp_tables.c 				} while (oldpos == pos + e->next_offset);
e                 355 net/ipv4/netfilter/arp_tables.c 				size = e->next_offset;
e                 356 net/ipv4/netfilter/arp_tables.c 				e = entry0 + pos + size;
e                 359 net/ipv4/netfilter/arp_tables.c 				e->counters.pcnt = pos;
e                 373 net/ipv4/netfilter/arp_tables.c 					newpos = pos + e->next_offset;
e                 377 net/ipv4/netfilter/arp_tables.c 				e = entry0 + newpos;
e                 378 net/ipv4/netfilter/arp_tables.c 				e->counters.pcnt = pos;
e                 387 net/ipv4/netfilter/arp_tables.c static int check_target(struct arpt_entry *e, struct net *net, const char *name)
e                 389 net/ipv4/netfilter/arp_tables.c 	struct xt_entry_target *t = arpt_get_target(e);
e                 393 net/ipv4/netfilter/arp_tables.c 		.entryinfo = e,
e                 396 net/ipv4/netfilter/arp_tables.c 		.hook_mask = e->comefrom,
e                 404 net/ipv4/netfilter/arp_tables.c find_check_entry(struct arpt_entry *e, struct net *net, const char *name,
e                 412 net/ipv4/netfilter/arp_tables.c 	if (!xt_percpu_counter_alloc(alloc_state, &e->counters))
e                 415 net/ipv4/netfilter/arp_tables.c 	t = arpt_get_target(e);
e                 424 net/ipv4/netfilter/arp_tables.c 	ret = check_target(e, net, name);
e                 431 net/ipv4/netfilter/arp_tables.c 	xt_percpu_counter_free(&e->counters);
e                 436 net/ipv4/netfilter/arp_tables.c static bool check_underflow(const struct arpt_entry *e)
e                 441 net/ipv4/netfilter/arp_tables.c 	if (!unconditional(e))
e                 443 net/ipv4/netfilter/arp_tables.c 	t = arpt_get_target_c(e);
e                 451 net/ipv4/netfilter/arp_tables.c static inline int check_entry_size_and_hooks(struct arpt_entry *e,
e                 462 net/ipv4/netfilter/arp_tables.c 	if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 ||
e                 463 net/ipv4/netfilter/arp_tables.c 	    (unsigned char *)e + sizeof(struct arpt_entry) >= limit ||
e                 464 net/ipv4/netfilter/arp_tables.c 	    (unsigned char *)e + e->next_offset > limit)
e                 467 net/ipv4/netfilter/arp_tables.c 	if (e->next_offset
e                 471 net/ipv4/netfilter/arp_tables.c 	if (!arp_checkentry(&e->arp))
e                 474 net/ipv4/netfilter/arp_tables.c 	err = xt_check_entry_offsets(e, e->elems, e->target_offset,
e                 475 net/ipv4/netfilter/arp_tables.c 				     e->next_offset);
e                 483 net/ipv4/netfilter/arp_tables.c 		if ((unsigned char *)e - base == hook_entries[h])
e                 485 net/ipv4/netfilter/arp_tables.c 		if ((unsigned char *)e - base == underflows[h]) {
e                 486 net/ipv4/netfilter/arp_tables.c 			if (!check_underflow(e))
e                 494 net/ipv4/netfilter/arp_tables.c 	e->counters = ((struct xt_counters) { 0, 0 });
e                 495 net/ipv4/netfilter/arp_tables.c 	e->comefrom = 0;
e                 499 net/ipv4/netfilter/arp_tables.c static void cleanup_entry(struct arpt_entry *e, struct net *net)
e                 504 net/ipv4/netfilter/arp_tables.c 	t = arpt_get_target(e);
e                 512 net/ipv4/netfilter/arp_tables.c 	xt_percpu_counter_free(&e->counters);
e                 674 net/ipv4/netfilter/arp_tables.c 	const struct arpt_entry *e;
e                 688 net/ipv4/netfilter/arp_tables.c 	for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
e                 691 net/ipv4/netfilter/arp_tables.c 		e = loc_cpu_entry + off;
e                 692 net/ipv4/netfilter/arp_tables.c 		if (copy_to_user(userptr + off, e, sizeof(*e))) {
e                 704 net/ipv4/netfilter/arp_tables.c 		t = arpt_get_target_c(e);
e                 705 net/ipv4/netfilter/arp_tables.c 		if (xt_target_to_user(t, userptr + off + e->target_offset)) {
e                 735 net/ipv4/netfilter/arp_tables.c static int compat_calc_entry(const struct arpt_entry *e,
e                 744 net/ipv4/netfilter/arp_tables.c 	entry_offset = (void *)e - base;
e                 746 net/ipv4/netfilter/arp_tables.c 	t = arpt_get_target_c(e);
e                 755 net/ipv4/netfilter/arp_tables.c 		    (e < (struct arpt_entry *)(base + info->hook_entry[i])))
e                 758 net/ipv4/netfilter/arp_tables.c 		    (e < (struct arpt_entry *)(base + info->underflow[i])))
e                1063 net/ipv4/netfilter/arp_tables.c static inline void compat_release_entry(struct compat_arpt_entry *e)
e                1067 net/ipv4/netfilter/arp_tables.c 	t = compat_arpt_get_target(e);
e                1072 net/ipv4/netfilter/arp_tables.c check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
e                1083 net/ipv4/netfilter/arp_tables.c 	if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 ||
e                1084 net/ipv4/netfilter/arp_tables.c 	    (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit ||
e                1085 net/ipv4/netfilter/arp_tables.c 	    (unsigned char *)e + e->next_offset > limit)
e                1088 net/ipv4/netfilter/arp_tables.c 	if (e->next_offset < sizeof(struct compat_arpt_entry) +
e                1092 net/ipv4/netfilter/arp_tables.c 	if (!arp_checkentry(&e->arp))
e                1095 net/ipv4/netfilter/arp_tables.c 	ret = xt_compat_check_entry_offsets(e, e->elems, e->target_offset,
e                1096 net/ipv4/netfilter/arp_tables.c 					    e->next_offset);
e                1101 net/ipv4/netfilter/arp_tables.c 	entry_offset = (void *)e - (void *)base;
e                1103 net/ipv4/netfilter/arp_tables.c 	t = compat_arpt_get_target(e);
e                1127 net/ipv4/netfilter/arp_tables.c compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
e                1138 net/ipv4/netfilter/arp_tables.c 	memcpy(de, e, sizeof(struct arpt_entry));
e                1139 net/ipv4/netfilter/arp_tables.c 	memcpy(&de->counters, &e->counters, sizeof(e->counters));
e                1144 net/ipv4/netfilter/arp_tables.c 	de->target_offset = e->target_offset - (origsize - *size);
e                1145 net/ipv4/netfilter/arp_tables.c 	t = compat_arpt_get_target(e);
e                1148 net/ipv4/netfilter/arp_tables.c 	de->next_offset = e->next_offset - (origsize - *size);
e                1321 net/ipv4/netfilter/arp_tables.c static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr,
e                1334 net/ipv4/netfilter/arp_tables.c 	if (copy_to_user(ce, e, sizeof(struct arpt_entry)) != 0 ||
e                1342 net/ipv4/netfilter/arp_tables.c 	target_offset = e->target_offset - (origsize - *size);
e                1344 net/ipv4/netfilter/arp_tables.c 	t = arpt_get_target(e);
e                1348 net/ipv4/netfilter/arp_tables.c 	next_offset = e->next_offset - (origsize - *size);
e                 110 net/ipv4/netfilter/ip_tables.c static inline bool unconditional(const struct ipt_entry *e)
e                 114 net/ipv4/netfilter/ip_tables.c 	return e->target_offset == sizeof(struct ipt_entry) &&
e                 115 net/ipv4/netfilter/ip_tables.c 	       memcmp(&e->ip, &uncond, sizeof(uncond)) == 0;
e                 120 net/ipv4/netfilter/ip_tables.c ipt_get_target_c(const struct ipt_entry *e)
e                 122 net/ipv4/netfilter/ip_tables.c 	return ipt_get_target((struct ipt_entry *)e);
e                 158 net/ipv4/netfilter/ip_tables.c get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
e                 168 net/ipv4/netfilter/ip_tables.c 	} else if (s == e) {
e                 194 net/ipv4/netfilter/ip_tables.c 			 const struct ipt_entry *e)
e                 207 net/ipv4/netfilter/ip_tables.c 		if (get_chainname_rulenum(iter, e, hookname,
e                 236 net/ipv4/netfilter/ip_tables.c 	struct ipt_entry *e, **jumpstack;
e                 276 net/ipv4/netfilter/ip_tables.c 	e = get_entry(table_base, private->hook_entry[hook]);
e                 283 net/ipv4/netfilter/ip_tables.c 		WARN_ON(!e);
e                 285 net/ipv4/netfilter/ip_tables.c 		    &e->ip, acpar.fragoff)) {
e                 287 net/ipv4/netfilter/ip_tables.c 			e = ipt_next_entry(e);
e                 291 net/ipv4/netfilter/ip_tables.c 		xt_ematch_foreach(ematch, e) {
e                 298 net/ipv4/netfilter/ip_tables.c 		counter = xt_get_this_cpu_counter(&e->counters);
e                 301 net/ipv4/netfilter/ip_tables.c 		t = ipt_get_target_c(e);
e                 308 net/ipv4/netfilter/ip_tables.c 				     state->out, table->name, private, e);
e                 322 net/ipv4/netfilter/ip_tables.c 					e = get_entry(table_base,
e                 325 net/ipv4/netfilter/ip_tables.c 					e = jumpstack[--stackidx];
e                 326 net/ipv4/netfilter/ip_tables.c 					e = ipt_next_entry(e);
e                 330 net/ipv4/netfilter/ip_tables.c 			if (table_base + v != ipt_next_entry(e) &&
e                 331 net/ipv4/netfilter/ip_tables.c 			    !(e->ip.flags & IPT_F_GOTO)) {
e                 336 net/ipv4/netfilter/ip_tables.c 				jumpstack[stackidx++] = e;
e                 339 net/ipv4/netfilter/ip_tables.c 			e = get_entry(table_base, v);
e                 350 net/ipv4/netfilter/ip_tables.c 			e = ipt_next_entry(e);
e                 378 net/ipv4/netfilter/ip_tables.c 		struct ipt_entry *e = entry0 + pos;
e                 384 net/ipv4/netfilter/ip_tables.c 		e->counters.pcnt = pos;
e                 388 net/ipv4/netfilter/ip_tables.c 				= (void *)ipt_get_target_c(e);
e                 389 net/ipv4/netfilter/ip_tables.c 			int visited = e->comefrom & (1 << hook);
e                 391 net/ipv4/netfilter/ip_tables.c 			if (e->comefrom & (1 << NF_INET_NUMHOOKS))
e                 394 net/ipv4/netfilter/ip_tables.c 			e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
e                 397 net/ipv4/netfilter/ip_tables.c 			if ((unconditional(e) &&
e                 406 net/ipv4/netfilter/ip_tables.c 					e->comefrom ^= (1<<NF_INET_NUMHOOKS);
e                 408 net/ipv4/netfilter/ip_tables.c 					pos = e->counters.pcnt;
e                 409 net/ipv4/netfilter/ip_tables.c 					e->counters.pcnt = 0;
e                 415 net/ipv4/netfilter/ip_tables.c 					e = entry0 + pos;
e                 416 net/ipv4/netfilter/ip_tables.c 				} while (oldpos == pos + e->next_offset);
e                 419 net/ipv4/netfilter/ip_tables.c 				size = e->next_offset;
e                 420 net/ipv4/netfilter/ip_tables.c 				e = entry0 + pos + size;
e                 423 net/ipv4/netfilter/ip_tables.c 				e->counters.pcnt = pos;
e                 437 net/ipv4/netfilter/ip_tables.c 					newpos = pos + e->next_offset;
e                 441 net/ipv4/netfilter/ip_tables.c 				e = entry0 + newpos;
e                 442 net/ipv4/netfilter/ip_tables.c 				e->counters.pcnt = pos;
e                 498 net/ipv4/netfilter/ip_tables.c static int check_target(struct ipt_entry *e, struct net *net, const char *name)
e                 500 net/ipv4/netfilter/ip_tables.c 	struct xt_entry_target *t = ipt_get_target(e);
e                 504 net/ipv4/netfilter/ip_tables.c 		.entryinfo = e,
e                 507 net/ipv4/netfilter/ip_tables.c 		.hook_mask = e->comefrom,
e                 512 net/ipv4/netfilter/ip_tables.c 			       e->ip.proto, e->ip.invflags & IPT_INV_PROTO);
e                 516 net/ipv4/netfilter/ip_tables.c find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
e                 527 net/ipv4/netfilter/ip_tables.c 	if (!xt_percpu_counter_alloc(alloc_state, &e->counters))
e                 534 net/ipv4/netfilter/ip_tables.c 	mtpar.entryinfo = &e->ip;
e                 535 net/ipv4/netfilter/ip_tables.c 	mtpar.hook_mask = e->comefrom;
e                 537 net/ipv4/netfilter/ip_tables.c 	xt_ematch_foreach(ematch, e) {
e                 544 net/ipv4/netfilter/ip_tables.c 	t = ipt_get_target(e);
e                 553 net/ipv4/netfilter/ip_tables.c 	ret = check_target(e, net, name);
e                 561 net/ipv4/netfilter/ip_tables.c 	xt_ematch_foreach(ematch, e) {
e                 567 net/ipv4/netfilter/ip_tables.c 	xt_percpu_counter_free(&e->counters);
e                 572 net/ipv4/netfilter/ip_tables.c static bool check_underflow(const struct ipt_entry *e)
e                 577 net/ipv4/netfilter/ip_tables.c 	if (!unconditional(e))
e                 579 net/ipv4/netfilter/ip_tables.c 	t = ipt_get_target_c(e);
e                 588 net/ipv4/netfilter/ip_tables.c check_entry_size_and_hooks(struct ipt_entry *e,
e                 599 net/ipv4/netfilter/ip_tables.c 	if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
e                 600 net/ipv4/netfilter/ip_tables.c 	    (unsigned char *)e + sizeof(struct ipt_entry) >= limit ||
e                 601 net/ipv4/netfilter/ip_tables.c 	    (unsigned char *)e + e->next_offset > limit)
e                 604 net/ipv4/netfilter/ip_tables.c 	if (e->next_offset
e                 608 net/ipv4/netfilter/ip_tables.c 	if (!ip_checkentry(&e->ip))
e                 611 net/ipv4/netfilter/ip_tables.c 	err = xt_check_entry_offsets(e, e->elems, e->target_offset,
e                 612 net/ipv4/netfilter/ip_tables.c 				     e->next_offset);
e                 620 net/ipv4/netfilter/ip_tables.c 		if ((unsigned char *)e - base == hook_entries[h])
e                 622 net/ipv4/netfilter/ip_tables.c 		if ((unsigned char *)e - base == underflows[h]) {
e                 623 net/ipv4/netfilter/ip_tables.c 			if (!check_underflow(e))
e                 631 net/ipv4/netfilter/ip_tables.c 	e->counters = ((struct xt_counters) { 0, 0 });
e                 632 net/ipv4/netfilter/ip_tables.c 	e->comefrom = 0;
e                 637 net/ipv4/netfilter/ip_tables.c cleanup_entry(struct ipt_entry *e, struct net *net)
e                 644 net/ipv4/netfilter/ip_tables.c 	xt_ematch_foreach(ematch, e)
e                 646 net/ipv4/netfilter/ip_tables.c 	t = ipt_get_target(e);
e                 655 net/ipv4/netfilter/ip_tables.c 	xt_percpu_counter_free(&e->counters);
e                 816 net/ipv4/netfilter/ip_tables.c 	const struct ipt_entry *e;
e                 830 net/ipv4/netfilter/ip_tables.c 	for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
e                 835 net/ipv4/netfilter/ip_tables.c 		e = loc_cpu_entry + off;
e                 836 net/ipv4/netfilter/ip_tables.c 		if (copy_to_user(userptr + off, e, sizeof(*e))) {
e                 849 net/ipv4/netfilter/ip_tables.c 		     i < e->target_offset;
e                 851 net/ipv4/netfilter/ip_tables.c 			m = (void *)e + i;
e                 859 net/ipv4/netfilter/ip_tables.c 		t = ipt_get_target_c(e);
e                 860 net/ipv4/netfilter/ip_tables.c 		if (xt_target_to_user(t, userptr + off + e->target_offset)) {
e                 890 net/ipv4/netfilter/ip_tables.c static int compat_calc_entry(const struct ipt_entry *e,
e                 900 net/ipv4/netfilter/ip_tables.c 	entry_offset = (void *)e - base;
e                 901 net/ipv4/netfilter/ip_tables.c 	xt_ematch_foreach(ematch, e)
e                 903 net/ipv4/netfilter/ip_tables.c 	t = ipt_get_target_c(e);
e                 912 net/ipv4/netfilter/ip_tables.c 		    (e < (struct ipt_entry *)(base + info->hook_entry[i])))
e                 915 net/ipv4/netfilter/ip_tables.c 		    (e < (struct ipt_entry *)(base + info->underflow[i])))
e                1218 net/ipv4/netfilter/ip_tables.c compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
e                1231 net/ipv4/netfilter/ip_tables.c 	if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 ||
e                1239 net/ipv4/netfilter/ip_tables.c 	xt_ematch_foreach(ematch, e) {
e                1244 net/ipv4/netfilter/ip_tables.c 	target_offset = e->target_offset - (origsize - *size);
e                1245 net/ipv4/netfilter/ip_tables.c 	t = ipt_get_target(e);
e                1249 net/ipv4/netfilter/ip_tables.c 	next_offset = e->next_offset - (origsize - *size);
e                1273 net/ipv4/netfilter/ip_tables.c static void compat_release_entry(struct compat_ipt_entry *e)
e                1279 net/ipv4/netfilter/ip_tables.c 	xt_ematch_foreach(ematch, e)
e                1281 net/ipv4/netfilter/ip_tables.c 	t = compat_ipt_get_target(e);
e                1286 net/ipv4/netfilter/ip_tables.c check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
e                1299 net/ipv4/netfilter/ip_tables.c 	if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
e                1300 net/ipv4/netfilter/ip_tables.c 	    (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit ||
e                1301 net/ipv4/netfilter/ip_tables.c 	    (unsigned char *)e + e->next_offset > limit)
e                1304 net/ipv4/netfilter/ip_tables.c 	if (e->next_offset < sizeof(struct compat_ipt_entry) +
e                1308 net/ipv4/netfilter/ip_tables.c 	if (!ip_checkentry(&e->ip))
e                1311 net/ipv4/netfilter/ip_tables.c 	ret = xt_compat_check_entry_offsets(e, e->elems,
e                1312 net/ipv4/netfilter/ip_tables.c 					    e->target_offset, e->next_offset);
e                1317 net/ipv4/netfilter/ip_tables.c 	entry_offset = (void *)e - (void *)base;
e                1319 net/ipv4/netfilter/ip_tables.c 	xt_ematch_foreach(ematch, e) {
e                1320 net/ipv4/netfilter/ip_tables.c 		ret = compat_find_calc_match(ematch, &e->ip, &off);
e                1326 net/ipv4/netfilter/ip_tables.c 	t = compat_ipt_get_target(e);
e                1346 net/ipv4/netfilter/ip_tables.c 	xt_ematch_foreach(ematch, e) {
e                1355 net/ipv4/netfilter/ip_tables.c compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
e                1367 net/ipv4/netfilter/ip_tables.c 	memcpy(de, e, sizeof(struct ipt_entry));
e                1368 net/ipv4/netfilter/ip_tables.c 	memcpy(&de->counters, &e->counters, sizeof(e->counters));
e                1373 net/ipv4/netfilter/ip_tables.c 	xt_ematch_foreach(ematch, e)
e                1376 net/ipv4/netfilter/ip_tables.c 	de->target_offset = e->target_offset - (origsize - *size);
e                1377 net/ipv4/netfilter/ip_tables.c 	t = compat_ipt_get_target(e);
e                1380 net/ipv4/netfilter/ip_tables.c 	de->next_offset = e->next_offset - (origsize - *size);
e                 461 net/ipv4/netfilter/ipt_CLUSTERIP.c 	const struct ipt_entry *e = par->entryinfo;
e                 477 net/ipv4/netfilter/ipt_CLUSTERIP.c 	if (e->ip.dmsk.s_addr != htonl(0xffffffff) ||
e                 478 net/ipv4/netfilter/ipt_CLUSTERIP.c 	    e->ip.dst.s_addr == 0) {
e                 495 net/ipv4/netfilter/ipt_CLUSTERIP.c 	config = clusterip_config_find_get(par->net, e->ip.dst.s_addr, 1);
e                 499 net/ipv4/netfilter/ipt_CLUSTERIP.c 				&e->ip.dst.s_addr);
e                 503 net/ipv4/netfilter/ipt_CLUSTERIP.c 						       e->ip.dst.s_addr,
e                 504 net/ipv4/netfilter/ipt_CLUSTERIP.c 						       e->ip.iniface);
e                  96 net/ipv4/netfilter/ipt_ECN.c 	const struct ipt_entry *e = par->entryinfo;
e                 105 net/ipv4/netfilter/ipt_ECN.c 	    (e->ip.proto != IPPROTO_TCP || (e->ip.invflags & XT_INV_PROTO))) {
e                  71 net/ipv4/netfilter/ipt_REJECT.c 	const struct ipt_entry *e = par->entryinfo;
e                  78 net/ipv4/netfilter/ipt_REJECT.c 		if (e->ip.proto != IPPROTO_TCP ||
e                  79 net/ipv4/netfilter/ipt_REJECT.c 		    (e->ip.invflags & XT_INV_PROTO)) {
e                  67 net/ipv4/netfilter/ipt_SYNPROXY.c 	const struct ipt_entry *e = par->entryinfo;
e                  70 net/ipv4/netfilter/ipt_SYNPROXY.c 	if (e->ip.proto != IPPROTO_TCP ||
e                  71 net/ipv4/netfilter/ipt_SYNPROXY.c 	    e->ip.invflags & XT_INV_PROTO)
e                  26 net/ipv4/tcp_cong.c 	struct tcp_congestion_ops *e;
e                  28 net/ipv4/tcp_cong.c 	list_for_each_entry_rcu(e, &tcp_cong_list, list) {
e                  29 net/ipv4/tcp_cong.c 		if (strcmp(e->name, name) == 0)
e                  30 net/ipv4/tcp_cong.c 			return e;
e                  56 net/ipv4/tcp_cong.c 	struct tcp_congestion_ops *e;
e                  58 net/ipv4/tcp_cong.c 	list_for_each_entry_rcu(e, &tcp_cong_list, list) {
e                  59 net/ipv4/tcp_cong.c 		if (e->key == key)
e                  60 net/ipv4/tcp_cong.c 			return e;
e                  23 net/ipv4/tcp_ulp.c 	struct tcp_ulp_ops *e;
e                  25 net/ipv4/tcp_ulp.c 	list_for_each_entry_rcu(e, &tcp_ulp_list, list) {
e                  26 net/ipv4/tcp_ulp.c 		if (strcmp(e->name, name) == 0)
e                  27 net/ipv4/tcp_ulp.c 			return e;
e                  21 net/ipv6/fou6.c static void fou6_build_udp(struct sk_buff *skb, struct ip_tunnel_encap *e,
e                  31 net/ipv6/fou6.c 	uh->dest = e->dport;
e                  34 net/ipv6/fou6.c 	udp6_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM6), skb,
e                  40 net/ipv6/fou6.c static int fou6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
e                  45 net/ipv6/fou6.c 	int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM6 ?
e                  48 net/ipv6/fou6.c 	err = __fou_build_header(skb, e, protocol, &sport, type);
e                  52 net/ipv6/fou6.c 	fou6_build_udp(skb, e, fl6, protocol, sport);
e                  57 net/ipv6/fou6.c static int gue6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
e                  62 net/ipv6/fou6.c 	int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM6 ?
e                  65 net/ipv6/fou6.c 	err = __gue_build_header(skb, e, protocol, &sport, type);
e                  69 net/ipv6/fou6.c 	fou6_build_udp(skb, e, fl6, protocol, sport);
e                 570 net/ipv6/ip6_fib.c 	unsigned int e = 0, s_e;
e                 636 net/ipv6/ip6_fib.c 		e = 0;
e                 639 net/ipv6/ip6_fib.c 			if (e < s_e)
e                 645 net/ipv6/ip6_fib.c 			e++;
e                 650 net/ipv6/ip6_fib.c 	cb->args[1] = e;
e                 135 net/ipv6/netfilter/ip6_tables.c static inline bool unconditional(const struct ip6t_entry *e)
e                 139 net/ipv6/netfilter/ip6_tables.c 	return e->target_offset == sizeof(struct ip6t_entry) &&
e                 140 net/ipv6/netfilter/ip6_tables.c 	       memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0;
e                 144 net/ipv6/netfilter/ip6_tables.c ip6t_get_target_c(const struct ip6t_entry *e)
e                 146 net/ipv6/netfilter/ip6_tables.c 	return ip6t_get_target((struct ip6t_entry *)e);
e                 183 net/ipv6/netfilter/ip6_tables.c get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
e                 193 net/ipv6/netfilter/ip6_tables.c 	} else if (s == e) {
e                 219 net/ipv6/netfilter/ip6_tables.c 			 const struct ip6t_entry *e)
e                 232 net/ipv6/netfilter/ip6_tables.c 		if (get_chainname_rulenum(iter, e, hookname,
e                 260 net/ipv6/netfilter/ip6_tables.c 	struct ip6t_entry *e, **jumpstack;
e                 298 net/ipv6/netfilter/ip6_tables.c 	e = get_entry(table_base, private->hook_entry[hook]);
e                 305 net/ipv6/netfilter/ip6_tables.c 		WARN_ON(!e);
e                 307 net/ipv6/netfilter/ip6_tables.c 		if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
e                 310 net/ipv6/netfilter/ip6_tables.c 			e = ip6t_next_entry(e);
e                 314 net/ipv6/netfilter/ip6_tables.c 		xt_ematch_foreach(ematch, e) {
e                 321 net/ipv6/netfilter/ip6_tables.c 		counter = xt_get_this_cpu_counter(&e->counters);
e                 324 net/ipv6/netfilter/ip6_tables.c 		t = ip6t_get_target_c(e);
e                 331 net/ipv6/netfilter/ip6_tables.c 				     state->out, table->name, private, e);
e                 345 net/ipv6/netfilter/ip6_tables.c 					e = get_entry(table_base,
e                 348 net/ipv6/netfilter/ip6_tables.c 					e = ip6t_next_entry(jumpstack[--stackidx]);
e                 351 net/ipv6/netfilter/ip6_tables.c 			if (table_base + v != ip6t_next_entry(e) &&
e                 352 net/ipv6/netfilter/ip6_tables.c 			    !(e->ipv6.flags & IP6T_F_GOTO)) {
e                 357 net/ipv6/netfilter/ip6_tables.c 				jumpstack[stackidx++] = e;
e                 360 net/ipv6/netfilter/ip6_tables.c 			e = get_entry(table_base, v);
e                 369 net/ipv6/netfilter/ip6_tables.c 			e = ip6t_next_entry(e);
e                 396 net/ipv6/netfilter/ip6_tables.c 		struct ip6t_entry *e = entry0 + pos;
e                 402 net/ipv6/netfilter/ip6_tables.c 		e->counters.pcnt = pos;
e                 406 net/ipv6/netfilter/ip6_tables.c 				= (void *)ip6t_get_target_c(e);
e                 407 net/ipv6/netfilter/ip6_tables.c 			int visited = e->comefrom & (1 << hook);
e                 409 net/ipv6/netfilter/ip6_tables.c 			if (e->comefrom & (1 << NF_INET_NUMHOOKS))
e                 412 net/ipv6/netfilter/ip6_tables.c 			e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
e                 415 net/ipv6/netfilter/ip6_tables.c 			if ((unconditional(e) &&
e                 424 net/ipv6/netfilter/ip6_tables.c 					e->comefrom ^= (1<<NF_INET_NUMHOOKS);
e                 426 net/ipv6/netfilter/ip6_tables.c 					pos = e->counters.pcnt;
e                 427 net/ipv6/netfilter/ip6_tables.c 					e->counters.pcnt = 0;
e                 433 net/ipv6/netfilter/ip6_tables.c 					e = entry0 + pos;
e                 434 net/ipv6/netfilter/ip6_tables.c 				} while (oldpos == pos + e->next_offset);
e                 437 net/ipv6/netfilter/ip6_tables.c 				size = e->next_offset;
e                 438 net/ipv6/netfilter/ip6_tables.c 				e = entry0 + pos + size;
e                 441 net/ipv6/netfilter/ip6_tables.c 				e->counters.pcnt = pos;
e                 455 net/ipv6/netfilter/ip6_tables.c 					newpos = pos + e->next_offset;
e                 459 net/ipv6/netfilter/ip6_tables.c 				e = entry0 + newpos;
e                 460 net/ipv6/netfilter/ip6_tables.c 				e->counters.pcnt = pos;
e                 516 net/ipv6/netfilter/ip6_tables.c static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
e                 518 net/ipv6/netfilter/ip6_tables.c 	struct xt_entry_target *t = ip6t_get_target(e);
e                 522 net/ipv6/netfilter/ip6_tables.c 		.entryinfo = e,
e                 525 net/ipv6/netfilter/ip6_tables.c 		.hook_mask = e->comefrom,
e                 530 net/ipv6/netfilter/ip6_tables.c 			       e->ipv6.proto,
e                 531 net/ipv6/netfilter/ip6_tables.c 			       e->ipv6.invflags & IP6T_INV_PROTO);
e                 535 net/ipv6/netfilter/ip6_tables.c find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
e                 546 net/ipv6/netfilter/ip6_tables.c 	if (!xt_percpu_counter_alloc(alloc_state, &e->counters))
e                 553 net/ipv6/netfilter/ip6_tables.c 	mtpar.entryinfo = &e->ipv6;
e                 554 net/ipv6/netfilter/ip6_tables.c 	mtpar.hook_mask = e->comefrom;
e                 556 net/ipv6/netfilter/ip6_tables.c 	xt_ematch_foreach(ematch, e) {
e                 563 net/ipv6/netfilter/ip6_tables.c 	t = ip6t_get_target(e);
e                 572 net/ipv6/netfilter/ip6_tables.c 	ret = check_target(e, net, name);
e                 579 net/ipv6/netfilter/ip6_tables.c 	xt_ematch_foreach(ematch, e) {
e                 585 net/ipv6/netfilter/ip6_tables.c 	xt_percpu_counter_free(&e->counters);
e                 590 net/ipv6/netfilter/ip6_tables.c static bool check_underflow(const struct ip6t_entry *e)
e                 595 net/ipv6/netfilter/ip6_tables.c 	if (!unconditional(e))
e                 597 net/ipv6/netfilter/ip6_tables.c 	t = ip6t_get_target_c(e);
e                 606 net/ipv6/netfilter/ip6_tables.c check_entry_size_and_hooks(struct ip6t_entry *e,
e                 617 net/ipv6/netfilter/ip6_tables.c 	if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
e                 618 net/ipv6/netfilter/ip6_tables.c 	    (unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||
e                 619 net/ipv6/netfilter/ip6_tables.c 	    (unsigned char *)e + e->next_offset > limit)
e                 622 net/ipv6/netfilter/ip6_tables.c 	if (e->next_offset
e                 626 net/ipv6/netfilter/ip6_tables.c 	if (!ip6_checkentry(&e->ipv6))
e                 629 net/ipv6/netfilter/ip6_tables.c 	err = xt_check_entry_offsets(e, e->elems, e->target_offset,
e                 630 net/ipv6/netfilter/ip6_tables.c 				     e->next_offset);
e                 638 net/ipv6/netfilter/ip6_tables.c 		if ((unsigned char *)e - base == hook_entries[h])
e                 640 net/ipv6/netfilter/ip6_tables.c 		if ((unsigned char *)e - base == underflows[h]) {
e                 641 net/ipv6/netfilter/ip6_tables.c 			if (!check_underflow(e))
e                 649 net/ipv6/netfilter/ip6_tables.c 	e->counters = ((struct xt_counters) { 0, 0 });
e                 650 net/ipv6/netfilter/ip6_tables.c 	e->comefrom = 0;
e                 654 net/ipv6/netfilter/ip6_tables.c static void cleanup_entry(struct ip6t_entry *e, struct net *net)
e                 661 net/ipv6/netfilter/ip6_tables.c 	xt_ematch_foreach(ematch, e)
e                 663 net/ipv6/netfilter/ip6_tables.c 	t = ip6t_get_target(e);
e                 672 net/ipv6/netfilter/ip6_tables.c 	xt_percpu_counter_free(&e->counters);
e                 832 net/ipv6/netfilter/ip6_tables.c 	const struct ip6t_entry *e;
e                 846 net/ipv6/netfilter/ip6_tables.c 	for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
e                 851 net/ipv6/netfilter/ip6_tables.c 		e = loc_cpu_entry + off;
e                 852 net/ipv6/netfilter/ip6_tables.c 		if (copy_to_user(userptr + off, e, sizeof(*e))) {
e                 865 net/ipv6/netfilter/ip6_tables.c 		     i < e->target_offset;
e                 867 net/ipv6/netfilter/ip6_tables.c 			m = (void *)e + i;
e                 875 net/ipv6/netfilter/ip6_tables.c 		t = ip6t_get_target_c(e);
e                 876 net/ipv6/netfilter/ip6_tables.c 		if (xt_target_to_user(t, userptr + off + e->target_offset)) {
e                 906 net/ipv6/netfilter/ip6_tables.c static int compat_calc_entry(const struct ip6t_entry *e,
e                 916 net/ipv6/netfilter/ip6_tables.c 	entry_offset = (void *)e - base;
e                 917 net/ipv6/netfilter/ip6_tables.c 	xt_ematch_foreach(ematch, e)
e                 919 net/ipv6/netfilter/ip6_tables.c 	t = ip6t_get_target_c(e);
e                 928 net/ipv6/netfilter/ip6_tables.c 		    (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
e                 931 net/ipv6/netfilter/ip6_tables.c 		    (e < (struct ip6t_entry *)(base + info->underflow[i])))
e                1234 net/ipv6/netfilter/ip6_tables.c compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
e                1247 net/ipv6/netfilter/ip6_tables.c 	if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
e                1255 net/ipv6/netfilter/ip6_tables.c 	xt_ematch_foreach(ematch, e) {
e                1260 net/ipv6/netfilter/ip6_tables.c 	target_offset = e->target_offset - (origsize - *size);
e                1261 net/ipv6/netfilter/ip6_tables.c 	t = ip6t_get_target(e);
e                1265 net/ipv6/netfilter/ip6_tables.c 	next_offset = e->next_offset - (origsize - *size);
e                1289 net/ipv6/netfilter/ip6_tables.c static void compat_release_entry(struct compat_ip6t_entry *e)
e                1295 net/ipv6/netfilter/ip6_tables.c 	xt_ematch_foreach(ematch, e)
e                1297 net/ipv6/netfilter/ip6_tables.c 	t = compat_ip6t_get_target(e);
e                1302 net/ipv6/netfilter/ip6_tables.c check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
e                1315 net/ipv6/netfilter/ip6_tables.c 	if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
e                1316 net/ipv6/netfilter/ip6_tables.c 	    (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
e                1317 net/ipv6/netfilter/ip6_tables.c 	    (unsigned char *)e + e->next_offset > limit)
e                1320 net/ipv6/netfilter/ip6_tables.c 	if (e->next_offset < sizeof(struct compat_ip6t_entry) +
e                1324 net/ipv6/netfilter/ip6_tables.c 	if (!ip6_checkentry(&e->ipv6))
e                1327 net/ipv6/netfilter/ip6_tables.c 	ret = xt_compat_check_entry_offsets(e, e->elems,
e                1328 net/ipv6/netfilter/ip6_tables.c 					    e->target_offset, e->next_offset);
e                1333 net/ipv6/netfilter/ip6_tables.c 	entry_offset = (void *)e - (void *)base;
e                1335 net/ipv6/netfilter/ip6_tables.c 	xt_ematch_foreach(ematch, e) {
e                1336 net/ipv6/netfilter/ip6_tables.c 		ret = compat_find_calc_match(ematch, &e->ipv6, &off);
e                1342 net/ipv6/netfilter/ip6_tables.c 	t = compat_ip6t_get_target(e);
e                1362 net/ipv6/netfilter/ip6_tables.c 	xt_ematch_foreach(ematch, e) {
e                1371 net/ipv6/netfilter/ip6_tables.c compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
e                1383 net/ipv6/netfilter/ip6_tables.c 	memcpy(de, e, sizeof(struct ip6t_entry));
e                1384 net/ipv6/netfilter/ip6_tables.c 	memcpy(&de->counters, &e->counters, sizeof(e->counters));
e                1389 net/ipv6/netfilter/ip6_tables.c 	xt_ematch_foreach(ematch, e)
e                1392 net/ipv6/netfilter/ip6_tables.c 	de->target_offset = e->target_offset - (origsize - *size);
e                1393 net/ipv6/netfilter/ip6_tables.c 	t = compat_ip6t_get_target(e);
e                1396 net/ipv6/netfilter/ip6_tables.c 	de->next_offset = e->next_offset - (origsize - *size);
e                  81 net/ipv6/netfilter/ip6t_REJECT.c 	const struct ip6t_entry *e = par->entryinfo;
e                  88 net/ipv6/netfilter/ip6t_REJECT.c 		if (!(e->ipv6.flags & IP6T_F_PROTO) ||
e                  89 net/ipv6/netfilter/ip6t_REJECT.c 		    e->ipv6.proto != IPPROTO_TCP ||
e                  90 net/ipv6/netfilter/ip6t_REJECT.c 		    (e->ipv6.invflags & XT_INV_PROTO)) {
e                  69 net/ipv6/netfilter/ip6t_SYNPROXY.c 	const struct ip6t_entry *e = par->entryinfo;
e                  72 net/ipv6/netfilter/ip6t_SYNPROXY.c 	if (!(e->ipv6.flags & IP6T_F_PROTO) ||
e                  73 net/ipv6/netfilter/ip6t_SYNPROXY.c 	    e->ipv6.proto != IPPROTO_TCP ||
e                  74 net/ipv6/netfilter/ip6t_SYNPROXY.c 	    e->ipv6.invflags & XT_INV_PROTO)
e                3807 net/key/af_key.c 	struct proc_dir_entry *e;
e                3809 net/key/af_key.c 	e = proc_create_net("pfkey", 0, net->proc_net, &pfkey_seq_ops,
e                3811 net/key/af_key.c 	if (e == NULL)
e                  44 net/mpls/af_mpls.c static size_t ipgre_mpls_encap_hlen(struct ip_tunnel_encap *e)
e                  47 net/netfilter/core.c #define nf_entry_dereference(e) \
e                  48 net/netfilter/core.c 	rcu_dereference_protected(e, lockdep_is_held(&nf_hook_mutex))
e                  52 net/netfilter/core.c 	struct nf_hook_entries *e;
e                  53 net/netfilter/core.c 	size_t alloc = sizeof(*e) +
e                  61 net/netfilter/core.c 	e = kvzalloc(alloc, GFP_KERNEL);
e                  62 net/netfilter/core.c 	if (e)
e                  63 net/netfilter/core.c 		e->num_hook_entries = num;
e                  64 net/netfilter/core.c 	return e;
e                  75 net/netfilter/core.c static void nf_hook_entries_free(struct nf_hook_entries *e)
e                  81 net/netfilter/core.c 	if (!e)
e                  84 net/netfilter/core.c 	num = e->num_hook_entries;
e                  85 net/netfilter/core.c 	ops = nf_hook_entries_get_hook_ops(e);
e                  87 net/netfilter/core.c 	head->allocation = e;
e                 506 net/netfilter/core.c 		 const struct nf_hook_entries *e, unsigned int s)
e                 511 net/netfilter/core.c 	for (; s < e->num_hook_entries; s++) {
e                 512 net/netfilter/core.c 		verdict = nf_hook_entry_hookfn(&e->hooks[s], skb, state);
e                 609 net/netfilter/core.c __netfilter_net_init(struct nf_hook_entries __rcu **e, int max)
e                 614 net/netfilter/core.c 		RCU_INIT_POINTER(e[h], NULL);
e                 120 net/netfilter/ipset/ip_set_bitmap_gen.h 	const struct mtype_adt_elem *e = value;
e                 121 net/netfilter/ipset/ip_set_bitmap_gen.h 	void *x = get_ext(set, map, e->id);
e                 122 net/netfilter/ipset/ip_set_bitmap_gen.h 	int ret = mtype_do_test(e, map, set->dsize);
e                 134 net/netfilter/ipset/ip_set_bitmap_gen.h 	const struct mtype_adt_elem *e = value;
e                 135 net/netfilter/ipset/ip_set_bitmap_gen.h 	void *x = get_ext(set, map, e->id);
e                 136 net/netfilter/ipset/ip_set_bitmap_gen.h 	int ret = mtype_do_add(e, map, flags, set->dsize);
e                 144 net/netfilter/ipset/ip_set_bitmap_gen.h 			set_bit(e->id, map->members);
e                 155 net/netfilter/ipset/ip_set_bitmap_gen.h 		mtype_add_timeout(ext_timeout(x, set), e, ext, set, map, ret);
e                 168 net/netfilter/ipset/ip_set_bitmap_gen.h 	set_bit(e->id, map->members);
e                 179 net/netfilter/ipset/ip_set_bitmap_gen.h 	const struct mtype_adt_elem *e = value;
e                 180 net/netfilter/ipset/ip_set_bitmap_gen.h 	void *x = get_ext(set, map, e->id);
e                 182 net/netfilter/ipset/ip_set_bitmap_gen.h 	if (mtype_do_del(e, map))
e                  67 net/netfilter/ipset/ip_set_bitmap_ip.c bitmap_ip_do_test(const struct bitmap_ip_adt_elem *e,
e                  70 net/netfilter/ipset/ip_set_bitmap_ip.c 	return !!test_bit(e->id, map->members);
e                  80 net/netfilter/ipset/ip_set_bitmap_ip.c bitmap_ip_do_add(const struct bitmap_ip_adt_elem *e, struct bitmap_ip *map,
e                  83 net/netfilter/ipset/ip_set_bitmap_ip.c 	return !!test_bit(e->id, map->members);
e                  87 net/netfilter/ipset/ip_set_bitmap_ip.c bitmap_ip_do_del(const struct bitmap_ip_adt_elem *e, struct bitmap_ip *map)
e                  89 net/netfilter/ipset/ip_set_bitmap_ip.c 	return !test_and_clear_bit(e->id, map->members);
e                 116 net/netfilter/ipset/ip_set_bitmap_ip.c 	struct bitmap_ip_adt_elem e = { .id = 0 };
e                 124 net/netfilter/ipset/ip_set_bitmap_ip.c 	e.id = ip_to_id(map, ip);
e                 126 net/netfilter/ipset/ip_set_bitmap_ip.c 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
e                 136 net/netfilter/ipset/ip_set_bitmap_ip.c 	struct bitmap_ip_adt_elem e = { .id = 0 };
e                 158 net/netfilter/ipset/ip_set_bitmap_ip.c 		e.id = ip_to_id(map, ip);
e                 159 net/netfilter/ipset/ip_set_bitmap_ip.c 		return adtfn(set, &e, &ext, &ext, flags);
e                 185 net/netfilter/ipset/ip_set_bitmap_ip.c 		e.id = ip_to_id(map, ip);
e                 186 net/netfilter/ipset/ip_set_bitmap_ip.c 		ret = adtfn(set, &e, &ext, &ext, flags);
e                  83 net/netfilter/ipset/ip_set_bitmap_ipmac.c bitmap_ipmac_do_test(const struct bitmap_ipmac_adt_elem *e,
e                  88 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	if (!test_bit(e->id, map->members))
e                  90 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	elem = get_const_elem(map->extensions, e->id, dsize);
e                  91 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	if (e->add_mac && elem->filled == MAC_FILLED)
e                  92 net/netfilter/ipset/ip_set_bitmap_ipmac.c 		return ether_addr_equal(e->ether, elem->ether);
e                 117 net/netfilter/ipset/ip_set_bitmap_ipmac.c 			 const struct bitmap_ipmac_adt_elem *e,
e                 134 net/netfilter/ipset/ip_set_bitmap_ipmac.c 		if (e->add_mac)
e                 143 net/netfilter/ipset/ip_set_bitmap_ipmac.c bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e,
e                 148 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	elem = get_elem(map->extensions, e->id, dsize);
e                 149 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	if (test_bit(e->id, map->members)) {
e                 151 net/netfilter/ipset/ip_set_bitmap_ipmac.c 			if (e->add_mac &&
e                 153 net/netfilter/ipset/ip_set_bitmap_ipmac.c 			    !ether_addr_equal(e->ether, elem->ether)) {
e                 155 net/netfilter/ipset/ip_set_bitmap_ipmac.c 				clear_bit(e->id, map->members);
e                 157 net/netfilter/ipset/ip_set_bitmap_ipmac.c 				ether_addr_copy(elem->ether, e->ether);
e                 160 net/netfilter/ipset/ip_set_bitmap_ipmac.c 		} else if (!e->add_mac)
e                 164 net/netfilter/ipset/ip_set_bitmap_ipmac.c 		clear_bit(e->id, map->members);
e                 166 net/netfilter/ipset/ip_set_bitmap_ipmac.c 		ether_addr_copy(elem->ether, e->ether);
e                 169 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	} else if (e->add_mac) {
e                 171 net/netfilter/ipset/ip_set_bitmap_ipmac.c 		ether_addr_copy(elem->ether, e->ether);
e                 181 net/netfilter/ipset/ip_set_bitmap_ipmac.c bitmap_ipmac_do_del(const struct bitmap_ipmac_adt_elem *e,
e                 184 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	return !test_and_clear_bit(e->id, map->members);
e                 214 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	struct bitmap_ipmac_adt_elem e = { .id = 0, .add_mac = 1 };
e                 227 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	e.id = ip_to_id(map, ip);
e                 230 net/netfilter/ipset/ip_set_bitmap_ipmac.c 		ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
e                 232 net/netfilter/ipset/ip_set_bitmap_ipmac.c 		ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
e                 234 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	if (is_zero_ether_addr(e.ether))
e                 237 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
e                 246 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	struct bitmap_ipmac_adt_elem e = { .id = 0 };
e                 268 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	e.id = ip_to_id(map, ip);
e                 272 net/netfilter/ipset/ip_set_bitmap_ipmac.c 		memcpy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]), ETH_ALEN);
e                 273 net/netfilter/ipset/ip_set_bitmap_ipmac.c 		e.add_mac = 1;
e                 275 net/netfilter/ipset/ip_set_bitmap_ipmac.c 	ret = adtfn(set, &e, &ext, &ext, flags);
e                  58 net/netfilter/ipset/ip_set_bitmap_port.c bitmap_port_do_test(const struct bitmap_port_adt_elem *e,
e                  61 net/netfilter/ipset/ip_set_bitmap_port.c 	return !!test_bit(e->id, map->members);
e                  71 net/netfilter/ipset/ip_set_bitmap_port.c bitmap_port_do_add(const struct bitmap_port_adt_elem *e,
e                  74 net/netfilter/ipset/ip_set_bitmap_port.c 	return !!test_bit(e->id, map->members);
e                  78 net/netfilter/ipset/ip_set_bitmap_port.c bitmap_port_do_del(const struct bitmap_port_adt_elem *e,
e                  81 net/netfilter/ipset/ip_set_bitmap_port.c 	return !test_and_clear_bit(e->id, map->members);
e                 106 net/netfilter/ipset/ip_set_bitmap_port.c 	struct bitmap_port_adt_elem e = { .id = 0 };
e                 120 net/netfilter/ipset/ip_set_bitmap_port.c 	e.id = port_to_id(map, port);
e                 122 net/netfilter/ipset/ip_set_bitmap_port.c 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
e                 131 net/netfilter/ipset/ip_set_bitmap_port.c 	struct bitmap_port_adt_elem e = { .id = 0 };
e                 152 net/netfilter/ipset/ip_set_bitmap_port.c 		e.id = port_to_id(map, port);
e                 153 net/netfilter/ipset/ip_set_bitmap_port.c 		return adtfn(set, &e, &ext, &ext, flags);
e                 171 net/netfilter/ipset/ip_set_bitmap_port.c 		e.id = port_to_id(map, port);
e                 172 net/netfilter/ipset/ip_set_bitmap_port.c 		ret = adtfn(set, &e, &ext, &ext, flags);
e                 453 net/netfilter/ipset/ip_set_core.c 		      const void *e, bool active)
e                 456 net/netfilter/ipset/ip_set_core.c 		unsigned long *timeout = ext_timeout(e, set);
e                 464 net/netfilter/ipset/ip_set_core.c 	    ip_set_put_counter(skb, ext_counter(e, set)))
e                 467 net/netfilter/ipset/ip_set_core.c 	    ip_set_put_comment(skb, ext_comment(e, set)))
e                 470 net/netfilter/ipset/ip_set_core.c 	    ip_set_put_skbinfo(skb, ext_skbinfo(e, set)))
e                 876 net/netfilter/ipset/ip_set_hash_gen.h 		u32 e;
e                 884 net/netfilter/ipset/ip_set_hash_gen.h 		for (e = 0; e < ahash_numof_locks(t->htable_bits); e++)
e                 885 net/netfilter/ipset/ip_set_hash_gen.h 			elements += t->hregion[e].elements;
e                1353 net/netfilter/ipset/ip_set_hash_gen.h 	const struct mtype_elem *e;
e                1379 net/netfilter/ipset/ip_set_hash_gen.h 			e = ahash_data(n, i, set->dsize);
e                1380 net/netfilter/ipset/ip_set_hash_gen.h 			if (SET_ELEM_EXPIRED(set, e))
e                1383 net/netfilter/ipset/ip_set_hash_gen.h 				 cb->args[IPSET_CB_ARG0], n, i, e);
e                1393 net/netfilter/ipset/ip_set_hash_gen.h 			if (mtype_data_list(skb, e))
e                1395 net/netfilter/ipset/ip_set_hash_gen.h 			if (ip_set_put_extensions(skb, set, e, true))
e                  56 net/netfilter/ipset/ip_set_hash_ip.c hash_ip4_data_list(struct sk_buff *skb, const struct hash_ip4_elem *e)
e                  58 net/netfilter/ipset/ip_set_hash_ip.c 	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, e->ip))
e                  67 net/netfilter/ipset/ip_set_hash_ip.c hash_ip4_data_next(struct hash_ip4_elem *next, const struct hash_ip4_elem *e)
e                  69 net/netfilter/ipset/ip_set_hash_ip.c 	next->ip = e->ip;
e                  83 net/netfilter/ipset/ip_set_hash_ip.c 	struct hash_ip4_elem e = { 0 };
e                  92 net/netfilter/ipset/ip_set_hash_ip.c 	e.ip = ip;
e                  93 net/netfilter/ipset/ip_set_hash_ip.c 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
e                 102 net/netfilter/ipset/ip_set_hash_ip.c 	struct hash_ip4_elem e = { 0 };
e                 122 net/netfilter/ipset/ip_set_hash_ip.c 	e.ip = htonl(ip);
e                 123 net/netfilter/ipset/ip_set_hash_ip.c 	if (e.ip == 0)
e                 127 net/netfilter/ipset/ip_set_hash_ip.c 		return adtfn(set, &e, &ext, &ext, flags);
e                 148 net/netfilter/ipset/ip_set_hash_ip.c 		e.ip = htonl(ip);
e                 151 net/netfilter/ipset/ip_set_hash_ip.c 		ret = adtfn(set, &e, &ext, &ext, flags);
e                 156 net/netfilter/ipset/ip_set_hash_ip.c 		e.ip = htonl(ip);
e                 157 net/netfilter/ipset/ip_set_hash_ip.c 		if (e.ip == 0)
e                 189 net/netfilter/ipset/ip_set_hash_ip.c hash_ip6_data_list(struct sk_buff *skb, const struct hash_ip6_elem *e)
e                 191 net/netfilter/ipset/ip_set_hash_ip.c 	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6))
e                 200 net/netfilter/ipset/ip_set_hash_ip.c hash_ip6_data_next(struct hash_ip6_elem *next, const struct hash_ip6_elem *e)
e                 220 net/netfilter/ipset/ip_set_hash_ip.c 	struct hash_ip6_elem e = { { .all = { 0 } } };
e                 223 net/netfilter/ipset/ip_set_hash_ip.c 	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
e                 224 net/netfilter/ipset/ip_set_hash_ip.c 	hash_ip6_netmask(&e.ip, h->netmask);
e                 225 net/netfilter/ipset/ip_set_hash_ip.c 	if (ipv6_addr_any(&e.ip.in6))
e                 228 net/netfilter/ipset/ip_set_hash_ip.c 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
e                 237 net/netfilter/ipset/ip_set_hash_ip.c 	struct hash_ip6_elem e = { { .all = { 0 } } };
e                 255 net/netfilter/ipset/ip_set_hash_ip.c 	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
e                 263 net/netfilter/ipset/ip_set_hash_ip.c 	hash_ip6_netmask(&e.ip, h->netmask);
e                 264 net/netfilter/ipset/ip_set_hash_ip.c 	if (ipv6_addr_any(&e.ip.in6))
e                 267 net/netfilter/ipset/ip_set_hash_ip.c 	ret = adtfn(set, &e, &ext, &ext, flags);
e                  59 net/netfilter/ipset/ip_set_hash_ipmac.c hash_ipmac4_data_list(struct sk_buff *skb, const struct hash_ipmac4_elem *e)
e                  61 net/netfilter/ipset/ip_set_hash_ipmac.c 	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, e->ip) ||
e                  62 net/netfilter/ipset/ip_set_hash_ipmac.c 	    nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN, e->ether))
e                  72 net/netfilter/ipset/ip_set_hash_ipmac.c 		      const struct hash_ipmac4_elem *e)
e                  74 net/netfilter/ipset/ip_set_hash_ipmac.c 	next->ip = e->ip;
e                  89 net/netfilter/ipset/ip_set_hash_ipmac.c 	struct hash_ipmac4_elem e = { .ip = 0, { .foo[0] = 0, .foo[1] = 0 } };
e                  97 net/netfilter/ipset/ip_set_hash_ipmac.c 		ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
e                  99 net/netfilter/ipset/ip_set_hash_ipmac.c 		ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
e                 101 net/netfilter/ipset/ip_set_hash_ipmac.c 	if (is_zero_ether_addr(e.ether))
e                 104 net/netfilter/ipset/ip_set_hash_ipmac.c 	ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
e                 106 net/netfilter/ipset/ip_set_hash_ipmac.c 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
e                 114 net/netfilter/ipset/ip_set_hash_ipmac.c 	struct hash_ipmac4_elem e = { .ip = 0, { .foo[0] = 0, .foo[1] = 0 } };
e                 132 net/netfilter/ipset/ip_set_hash_ipmac.c 	ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip) ||
e                 136 net/netfilter/ipset/ip_set_hash_ipmac.c 	memcpy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]), ETH_ALEN);
e                 137 net/netfilter/ipset/ip_set_hash_ipmac.c 	if (is_zero_ether_addr(e.ether))
e                 140 net/netfilter/ipset/ip_set_hash_ipmac.c 	return adtfn(set, &e, &ext, &ext, flags);
e                 167 net/netfilter/ipset/ip_set_hash_ipmac.c hash_ipmac6_data_list(struct sk_buff *skb, const struct hash_ipmac6_elem *e)
e                 169 net/netfilter/ipset/ip_set_hash_ipmac.c 	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
e                 170 net/netfilter/ipset/ip_set_hash_ipmac.c 	    nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN, e->ether))
e                 180 net/netfilter/ipset/ip_set_hash_ipmac.c 		      const struct hash_ipmac6_elem *e)
e                 202 net/netfilter/ipset/ip_set_hash_ipmac.c 	struct hash_ipmac6_elem e = {
e                 213 net/netfilter/ipset/ip_set_hash_ipmac.c 		ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
e                 215 net/netfilter/ipset/ip_set_hash_ipmac.c 		ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
e                 217 net/netfilter/ipset/ip_set_hash_ipmac.c 	if (is_zero_ether_addr(e.ether))
e                 220 net/netfilter/ipset/ip_set_hash_ipmac.c 	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
e                 222 net/netfilter/ipset/ip_set_hash_ipmac.c 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
e                 230 net/netfilter/ipset/ip_set_hash_ipmac.c 	struct hash_ipmac6_elem e = {
e                 251 net/netfilter/ipset/ip_set_hash_ipmac.c 	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
e                 256 net/netfilter/ipset/ip_set_hash_ipmac.c 	memcpy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]), ETH_ALEN);
e                 257 net/netfilter/ipset/ip_set_hash_ipmac.c 	if (is_zero_ether_addr(e.ether))
e                 260 net/netfilter/ipset/ip_set_hash_ipmac.c 	return adtfn(set, &e, &ext, &ext, flags);
e                  85 net/netfilter/ipset/ip_set_hash_ipmark.c 	struct hash_ipmark4_elem e = { };
e                  88 net/netfilter/ipset/ip_set_hash_ipmark.c 	e.mark = skb->mark;
e                  89 net/netfilter/ipset/ip_set_hash_ipmark.c 	e.mark &= h->markmask;
e                  91 net/netfilter/ipset/ip_set_hash_ipmark.c 	ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
e                  92 net/netfilter/ipset/ip_set_hash_ipmark.c 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
e                 101 net/netfilter/ipset/ip_set_hash_ipmark.c 	struct hash_ipmark4_elem e = { };
e                 113 net/netfilter/ipset/ip_set_hash_ipmark.c 	ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip);
e                 121 net/netfilter/ipset/ip_set_hash_ipmark.c 	e.mark = ntohl(nla_get_be32(tb[IPSET_ATTR_MARK]));
e                 122 net/netfilter/ipset/ip_set_hash_ipmark.c 	e.mark &= h->markmask;
e                 126 net/netfilter/ipset/ip_set_hash_ipmark.c 		ret = adtfn(set, &e, &ext, &ext, flags);
e                 130 net/netfilter/ipset/ip_set_hash_ipmark.c 	ip_to = ip = ntohl(e.ip);
e                 148 net/netfilter/ipset/ip_set_hash_ipmark.c 		e.ip = htonl(ip);
e                 149 net/netfilter/ipset/ip_set_hash_ipmark.c 		ret = adtfn(set, &e, &ext, &ext, flags);
e                 211 net/netfilter/ipset/ip_set_hash_ipmark.c 	struct hash_ipmark6_elem e = { };
e                 214 net/netfilter/ipset/ip_set_hash_ipmark.c 	e.mark = skb->mark;
e                 215 net/netfilter/ipset/ip_set_hash_ipmark.c 	e.mark &= h->markmask;
e                 217 net/netfilter/ipset/ip_set_hash_ipmark.c 	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
e                 218 net/netfilter/ipset/ip_set_hash_ipmark.c 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
e                 227 net/netfilter/ipset/ip_set_hash_ipmark.c 	struct hash_ipmark6_elem e = { };
e                 246 net/netfilter/ipset/ip_set_hash_ipmark.c 	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
e                 254 net/netfilter/ipset/ip_set_hash_ipmark.c 	e.mark = ntohl(nla_get_be32(tb[IPSET_ATTR_MARK]));
e                 255 net/netfilter/ipset/ip_set_hash_ipmark.c 	e.mark &= h->markmask;
e                 258 net/netfilter/ipset/ip_set_hash_ipmark.c 		ret = adtfn(set, &e, &ext, &ext, flags);
e                 262 net/netfilter/ipset/ip_set_hash_ipmark.c 	ret = adtfn(set, &e, &ext, &ext, flags);
e                  92 net/netfilter/ipset/ip_set_hash_ipport.c 	struct hash_ipport4_elem e = { .ip = 0 };
e                  96 net/netfilter/ipset/ip_set_hash_ipport.c 				 &e.port, &e.proto))
e                  99 net/netfilter/ipset/ip_set_hash_ipport.c 	ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
e                 100 net/netfilter/ipset/ip_set_hash_ipport.c 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
e                 109 net/netfilter/ipset/ip_set_hash_ipport.c 	struct hash_ipport4_elem e = { .ip = 0 };
e                 123 net/netfilter/ipset/ip_set_hash_ipport.c 	ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip);
e                 131 net/netfilter/ipset/ip_set_hash_ipport.c 	e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
e                 134 net/netfilter/ipset/ip_set_hash_ipport.c 		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
e                 135 net/netfilter/ipset/ip_set_hash_ipport.c 		with_ports = ip_set_proto_with_ports(e.proto);
e                 137 net/netfilter/ipset/ip_set_hash_ipport.c 		if (e.proto == 0)
e                 143 net/netfilter/ipset/ip_set_hash_ipport.c 	if (!(with_ports || e.proto == IPPROTO_ICMP))
e                 144 net/netfilter/ipset/ip_set_hash_ipport.c 		e.port = 0;
e                 149 net/netfilter/ipset/ip_set_hash_ipport.c 		ret = adtfn(set, &e, &ext, &ext, flags);
e                 153 net/netfilter/ipset/ip_set_hash_ipport.c 	ip_to = ip = ntohl(e.ip);
e                 168 net/netfilter/ipset/ip_set_hash_ipport.c 	port_to = port = ntohs(e.port);
e                 181 net/netfilter/ipset/ip_set_hash_ipport.c 			e.ip = htonl(ip);
e                 182 net/netfilter/ipset/ip_set_hash_ipport.c 			e.port = htons(p);
e                 183 net/netfilter/ipset/ip_set_hash_ipport.c 			ret = adtfn(set, &e, &ext, &ext, flags);
e                 250 net/netfilter/ipset/ip_set_hash_ipport.c 	struct hash_ipport6_elem e = { .ip = { .all = { 0 } } };
e                 254 net/netfilter/ipset/ip_set_hash_ipport.c 				 &e.port, &e.proto))
e                 257 net/netfilter/ipset/ip_set_hash_ipport.c 	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
e                 258 net/netfilter/ipset/ip_set_hash_ipport.c 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
e                 267 net/netfilter/ipset/ip_set_hash_ipport.c 	struct hash_ipport6_elem e = { .ip = { .all = { 0 } } };
e                 289 net/netfilter/ipset/ip_set_hash_ipport.c 	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
e                 297 net/netfilter/ipset/ip_set_hash_ipport.c 	e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
e                 300 net/netfilter/ipset/ip_set_hash_ipport.c 		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
e                 301 net/netfilter/ipset/ip_set_hash_ipport.c 		with_ports = ip_set_proto_with_ports(e.proto);
e                 303 net/netfilter/ipset/ip_set_hash_ipport.c 		if (e.proto == 0)
e                 309 net/netfilter/ipset/ip_set_hash_ipport.c 	if (!(with_ports || e.proto == IPPROTO_ICMPV6))
e                 310 net/netfilter/ipset/ip_set_hash_ipport.c 		e.port = 0;
e                 313 net/netfilter/ipset/ip_set_hash_ipport.c 		ret = adtfn(set, &e, &ext, &ext, flags);
e                 317 net/netfilter/ipset/ip_set_hash_ipport.c 	port = ntohs(e.port);
e                 325 net/netfilter/ipset/ip_set_hash_ipport.c 		e.port = htons(port);
e                 326 net/netfilter/ipset/ip_set_hash_ipport.c 		ret = adtfn(set, &e, &ext, &ext, flags);
e                  94 net/netfilter/ipset/ip_set_hash_ipportip.c 	struct hash_ipportip4_elem e = { .ip = 0 };
e                  98 net/netfilter/ipset/ip_set_hash_ipportip.c 				 &e.port, &e.proto))
e                 101 net/netfilter/ipset/ip_set_hash_ipportip.c 	ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
e                 102 net/netfilter/ipset/ip_set_hash_ipportip.c 	ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip2);
e                 103 net/netfilter/ipset/ip_set_hash_ipportip.c 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
e                 112 net/netfilter/ipset/ip_set_hash_ipportip.c 	struct hash_ipportip4_elem e = { .ip = 0 };
e                 126 net/netfilter/ipset/ip_set_hash_ipportip.c 	ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip);
e                 134 net/netfilter/ipset/ip_set_hash_ipportip.c 	ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP2], &e.ip2);
e                 138 net/netfilter/ipset/ip_set_hash_ipportip.c 	e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
e                 141 net/netfilter/ipset/ip_set_hash_ipportip.c 		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
e                 142 net/netfilter/ipset/ip_set_hash_ipportip.c 		with_ports = ip_set_proto_with_ports(e.proto);
e                 144 net/netfilter/ipset/ip_set_hash_ipportip.c 		if (e.proto == 0)
e                 150 net/netfilter/ipset/ip_set_hash_ipportip.c 	if (!(with_ports || e.proto == IPPROTO_ICMP))
e                 151 net/netfilter/ipset/ip_set_hash_ipportip.c 		e.port = 0;
e                 156 net/netfilter/ipset/ip_set_hash_ipportip.c 		ret = adtfn(set, &e, &ext, &ext, flags);
e                 160 net/netfilter/ipset/ip_set_hash_ipportip.c 	ip_to = ip = ntohl(e.ip);
e                 175 net/netfilter/ipset/ip_set_hash_ipportip.c 	port_to = port = ntohs(e.port);
e                 188 net/netfilter/ipset/ip_set_hash_ipportip.c 			e.ip = htonl(ip);
e                 189 net/netfilter/ipset/ip_set_hash_ipportip.c 			e.port = htons(p);
e                 190 net/netfilter/ipset/ip_set_hash_ipportip.c 			ret = adtfn(set, &e, &ext, &ext, flags);
e                 260 net/netfilter/ipset/ip_set_hash_ipportip.c 	struct hash_ipportip6_elem e = { .ip = { .all = { 0 } } };
e                 264 net/netfilter/ipset/ip_set_hash_ipportip.c 				 &e.port, &e.proto))
e                 267 net/netfilter/ipset/ip_set_hash_ipportip.c 	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
e                 268 net/netfilter/ipset/ip_set_hash_ipportip.c 	ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip2.in6);
e                 269 net/netfilter/ipset/ip_set_hash_ipportip.c 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
e                 278 net/netfilter/ipset/ip_set_hash_ipportip.c 	struct hash_ipportip6_elem e = {  .ip = { .all = { 0 } } };
e                 300 net/netfilter/ipset/ip_set_hash_ipportip.c 	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
e                 308 net/netfilter/ipset/ip_set_hash_ipportip.c 	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip2);
e                 312 net/netfilter/ipset/ip_set_hash_ipportip.c 	e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
e                 315 net/netfilter/ipset/ip_set_hash_ipportip.c 		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
e                 316 net/netfilter/ipset/ip_set_hash_ipportip.c 		with_ports = ip_set_proto_with_ports(e.proto);
e                 318 net/netfilter/ipset/ip_set_hash_ipportip.c 		if (e.proto == 0)
e                 324 net/netfilter/ipset/ip_set_hash_ipportip.c 	if (!(with_ports || e.proto == IPPROTO_ICMPV6))
e                 325 net/netfilter/ipset/ip_set_hash_ipportip.c 		e.port = 0;
e                 328 net/netfilter/ipset/ip_set_hash_ipportip.c 		ret = adtfn(set, &e, &ext, &ext, flags);
e                 332 net/netfilter/ipset/ip_set_hash_ipportip.c 	port = ntohs(e.port);
e                 340 net/netfilter/ipset/ip_set_hash_ipportip.c 		e.port = htons(port);
e                 341 net/netfilter/ipset/ip_set_hash_ipportip.c 		ret = adtfn(set, &e, &ext, &ext, flags);
e                 139 net/netfilter/ipset/ip_set_hash_ipportnet.c 	struct hash_ipportnet4_elem e = {
e                 145 net/netfilter/ipset/ip_set_hash_ipportnet.c 		e.cidr = HOST_MASK - 1;
e                 148 net/netfilter/ipset/ip_set_hash_ipportnet.c 				 &e.port, &e.proto))
e                 151 net/netfilter/ipset/ip_set_hash_ipportnet.c 	ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
e                 152 net/netfilter/ipset/ip_set_hash_ipportnet.c 	ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip2);
e                 153 net/netfilter/ipset/ip_set_hash_ipportnet.c 	e.ip2 &= ip_set_netmask(e.cidr + 1);
e                 155 net/netfilter/ipset/ip_set_hash_ipportnet.c 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
e                 164 net/netfilter/ipset/ip_set_hash_ipportnet.c 	struct hash_ipportnet4_elem e = { .cidr = HOST_MASK - 1 };
e                 197 net/netfilter/ipset/ip_set_hash_ipportnet.c 		e.cidr = cidr - 1;
e                 200 net/netfilter/ipset/ip_set_hash_ipportnet.c 	e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
e                 203 net/netfilter/ipset/ip_set_hash_ipportnet.c 		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
e                 204 net/netfilter/ipset/ip_set_hash_ipportnet.c 		with_ports = ip_set_proto_with_ports(e.proto);
e                 206 net/netfilter/ipset/ip_set_hash_ipportnet.c 		if (e.proto == 0)
e                 212 net/netfilter/ipset/ip_set_hash_ipportnet.c 	if (!(with_ports || e.proto == IPPROTO_ICMP))
e                 213 net/netfilter/ipset/ip_set_hash_ipportnet.c 		e.port = 0;
e                 226 net/netfilter/ipset/ip_set_hash_ipportnet.c 		e.ip = htonl(ip);
e                 227 net/netfilter/ipset/ip_set_hash_ipportnet.c 		e.ip2 = htonl(ip2_from & ip_set_hostmask(e.cidr + 1));
e                 228 net/netfilter/ipset/ip_set_hash_ipportnet.c 		ret = adtfn(set, &e, &ext, &ext, flags);
e                 248 net/netfilter/ipset/ip_set_hash_ipportnet.c 	port_to = port = ntohs(e.port);
e                 265 net/netfilter/ipset/ip_set_hash_ipportnet.c 		ip_set_mask_from_to(ip2_from, ip2_to, e.cidr + 1);
e                 277 net/netfilter/ipset/ip_set_hash_ipportnet.c 		e.ip = htonl(ip);
e                 279 net/netfilter/ipset/ip_set_hash_ipportnet.c 			e.port = htons(p);
e                 281 net/netfilter/ipset/ip_set_hash_ipportnet.c 				e.ip2 = htonl(ip2);
e                 283 net/netfilter/ipset/ip_set_hash_ipportnet.c 				e.cidr = cidr - 1;
e                 284 net/netfilter/ipset/ip_set_hash_ipportnet.c 				ret = adtfn(set, &e, &ext, &ext, flags);
e                 390 net/netfilter/ipset/ip_set_hash_ipportnet.c 	struct hash_ipportnet6_elem e = {
e                 396 net/netfilter/ipset/ip_set_hash_ipportnet.c 		e.cidr = HOST_MASK - 1;
e                 399 net/netfilter/ipset/ip_set_hash_ipportnet.c 				 &e.port, &e.proto))
e                 402 net/netfilter/ipset/ip_set_hash_ipportnet.c 	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
e                 403 net/netfilter/ipset/ip_set_hash_ipportnet.c 	ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip2.in6);
e                 404 net/netfilter/ipset/ip_set_hash_ipportnet.c 	ip6_netmask(&e.ip2, e.cidr + 1);
e                 406 net/netfilter/ipset/ip_set_hash_ipportnet.c 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
e                 415 net/netfilter/ipset/ip_set_hash_ipportnet.c 	struct hash_ipportnet6_elem e = { .cidr = HOST_MASK - 1 };
e                 439 net/netfilter/ipset/ip_set_hash_ipportnet.c 	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
e                 447 net/netfilter/ipset/ip_set_hash_ipportnet.c 	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip2);
e                 455 net/netfilter/ipset/ip_set_hash_ipportnet.c 		e.cidr = cidr - 1;
e                 458 net/netfilter/ipset/ip_set_hash_ipportnet.c 	ip6_netmask(&e.ip2, e.cidr + 1);
e                 460 net/netfilter/ipset/ip_set_hash_ipportnet.c 	e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
e                 463 net/netfilter/ipset/ip_set_hash_ipportnet.c 		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
e                 464 net/netfilter/ipset/ip_set_hash_ipportnet.c 		with_ports = ip_set_proto_with_ports(e.proto);
e                 466 net/netfilter/ipset/ip_set_hash_ipportnet.c 		if (e.proto == 0)
e                 472 net/netfilter/ipset/ip_set_hash_ipportnet.c 	if (!(with_ports || e.proto == IPPROTO_ICMPV6))
e                 473 net/netfilter/ipset/ip_set_hash_ipportnet.c 		e.port = 0;
e                 483 net/netfilter/ipset/ip_set_hash_ipportnet.c 		ret = adtfn(set, &e, &ext, &ext, flags);
e                 488 net/netfilter/ipset/ip_set_hash_ipportnet.c 	port = ntohs(e.port);
e                 496 net/netfilter/ipset/ip_set_hash_ipportnet.c 		e.port = htons(port);
e                 497 net/netfilter/ipset/ip_set_hash_ipportnet.c 		ret = adtfn(set, &e, &ext, &ext, flags);
e                  49 net/netfilter/ipset/ip_set_hash_mac.c hash_mac4_data_list(struct sk_buff *skb, const struct hash_mac4_elem *e)
e                  51 net/netfilter/ipset/ip_set_hash_mac.c 	if (nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN, e->ether))
e                  61 net/netfilter/ipset/ip_set_hash_mac.c 		    const struct hash_mac4_elem *e)
e                  77 net/netfilter/ipset/ip_set_hash_mac.c 	struct hash_mac4_elem e = { { .foo[0] = 0, .foo[1] = 0 } };
e                  85 net/netfilter/ipset/ip_set_hash_mac.c 		ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
e                  87 net/netfilter/ipset/ip_set_hash_mac.c 		ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
e                  89 net/netfilter/ipset/ip_set_hash_mac.c 	if (is_zero_ether_addr(e.ether))
e                  91 net/netfilter/ipset/ip_set_hash_mac.c 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
e                  99 net/netfilter/ipset/ip_set_hash_mac.c 	struct hash_mac4_elem e = { { .foo[0] = 0, .foo[1] = 0 } };
e                 113 net/netfilter/ipset/ip_set_hash_mac.c 	ether_addr_copy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]));
e                 114 net/netfilter/ipset/ip_set_hash_mac.c 	if (is_zero_ether_addr(e.ether))
e                 117 net/netfilter/ipset/ip_set_hash_mac.c 	return adtfn(set, &e, &ext, &ext, flags);
e                 118 net/netfilter/ipset/ip_set_hash_net.c 	struct hash_net4_elem e = {
e                 123 net/netfilter/ipset/ip_set_hash_net.c 	if (e.cidr == 0)
e                 126 net/netfilter/ipset/ip_set_hash_net.c 		e.cidr = HOST_MASK;
e                 128 net/netfilter/ipset/ip_set_hash_net.c 	ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
e                 129 net/netfilter/ipset/ip_set_hash_net.c 	e.ip &= ip_set_netmask(e.cidr);
e                 131 net/netfilter/ipset/ip_set_hash_net.c 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
e                 140 net/netfilter/ipset/ip_set_hash_net.c 	struct hash_net4_elem e = { .cidr = HOST_MASK };
e                 161 net/netfilter/ipset/ip_set_hash_net.c 		e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
e                 162 net/netfilter/ipset/ip_set_hash_net.c 		if (!e.cidr || e.cidr > HOST_MASK)
e                 174 net/netfilter/ipset/ip_set_hash_net.c 		e.ip = htonl(ip & ip_set_hostmask(e.cidr));
e                 175 net/netfilter/ipset/ip_set_hash_net.c 		ret = adtfn(set, &e, &ext, &ext, flags);
e                 193 net/netfilter/ipset/ip_set_hash_net.c 		e.ip = htonl(ip);
e                 194 net/netfilter/ipset/ip_set_hash_net.c 		ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
e                 195 net/netfilter/ipset/ip_set_hash_net.c 		ret = adtfn(set, &e, &ext, &ext, flags);
e                 286 net/netfilter/ipset/ip_set_hash_net.c 	struct hash_net6_elem e = {
e                 291 net/netfilter/ipset/ip_set_hash_net.c 	if (e.cidr == 0)
e                 294 net/netfilter/ipset/ip_set_hash_net.c 		e.cidr = HOST_MASK;
e                 296 net/netfilter/ipset/ip_set_hash_net.c 	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
e                 297 net/netfilter/ipset/ip_set_hash_net.c 	ip6_netmask(&e.ip, e.cidr);
e                 299 net/netfilter/ipset/ip_set_hash_net.c 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
e                 307 net/netfilter/ipset/ip_set_hash_net.c 	struct hash_net6_elem e = { .cidr = HOST_MASK };
e                 320 net/netfilter/ipset/ip_set_hash_net.c 	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
e                 329 net/netfilter/ipset/ip_set_hash_net.c 		e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
e                 330 net/netfilter/ipset/ip_set_hash_net.c 		if (!e.cidr || e.cidr > HOST_MASK)
e                 334 net/netfilter/ipset/ip_set_hash_net.c 	ip6_netmask(&e.ip, e.cidr);
e                 343 net/netfilter/ipset/ip_set_hash_net.c 	ret = adtfn(set, &e, &ext, &ext, flags);
e                 157 net/netfilter/ipset/ip_set_hash_netiface.c 	struct hash_netiface4_elem e = {
e                 164 net/netfilter/ipset/ip_set_hash_netiface.c 		e.cidr = HOST_MASK;
e                 166 net/netfilter/ipset/ip_set_hash_netiface.c 	ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
e                 167 net/netfilter/ipset/ip_set_hash_netiface.c 	e.ip &= ip_set_netmask(e.cidr);
e                 179 net/netfilter/ipset/ip_set_hash_netiface.c 		STRLCPY(e.iface, eiface);
e                 180 net/netfilter/ipset/ip_set_hash_netiface.c 		e.physdev = 1;
e                 183 net/netfilter/ipset/ip_set_hash_netiface.c 		STRLCPY(e.iface, SRCDIR ? IFACE(in) : IFACE(out));
e                 186 net/netfilter/ipset/ip_set_hash_netiface.c 	if (strlen(e.iface) == 0)
e                 188 net/netfilter/ipset/ip_set_hash_netiface.c 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
e                 197 net/netfilter/ipset/ip_set_hash_netiface.c 	struct hash_netiface4_elem e = { .cidr = HOST_MASK, .elem = 1 };
e                 219 net/netfilter/ipset/ip_set_hash_netiface.c 		e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
e                 220 net/netfilter/ipset/ip_set_hash_netiface.c 		if (e.cidr > HOST_MASK)
e                 223 net/netfilter/ipset/ip_set_hash_netiface.c 	nla_strlcpy(e.iface, tb[IPSET_ATTR_IFACE], IFNAMSIZ);
e                 229 net/netfilter/ipset/ip_set_hash_netiface.c 			e.physdev = 1;
e                 234 net/netfilter/ipset/ip_set_hash_netiface.c 		e.ip = htonl(ip & ip_set_hostmask(e.cidr));
e                 235 net/netfilter/ipset/ip_set_hash_netiface.c 		ret = adtfn(set, &e, &ext, &ext, flags);
e                 249 net/netfilter/ipset/ip_set_hash_netiface.c 		ip_set_mask_from_to(ip, ip_to, e.cidr);
e                 255 net/netfilter/ipset/ip_set_hash_netiface.c 		e.ip = htonl(ip);
e                 256 net/netfilter/ipset/ip_set_hash_netiface.c 		ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
e                 257 net/netfilter/ipset/ip_set_hash_netiface.c 		ret = adtfn(set, &e, &ext, &ext, flags);
e                 367 net/netfilter/ipset/ip_set_hash_netiface.c 	struct hash_netiface6_elem e = {
e                 374 net/netfilter/ipset/ip_set_hash_netiface.c 		e.cidr = HOST_MASK;
e                 376 net/netfilter/ipset/ip_set_hash_netiface.c 	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
e                 377 net/netfilter/ipset/ip_set_hash_netiface.c 	ip6_netmask(&e.ip, e.cidr);
e                 386 net/netfilter/ipset/ip_set_hash_netiface.c 		STRLCPY(e.iface, eiface);
e                 387 net/netfilter/ipset/ip_set_hash_netiface.c 		e.physdev = 1;
e                 390 net/netfilter/ipset/ip_set_hash_netiface.c 		STRLCPY(e.iface, SRCDIR ? IFACE(in) : IFACE(out));
e                 393 net/netfilter/ipset/ip_set_hash_netiface.c 	if (strlen(e.iface) == 0)
e                 396 net/netfilter/ipset/ip_set_hash_netiface.c 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
e                 404 net/netfilter/ipset/ip_set_hash_netiface.c 	struct hash_netiface6_elem e = { .cidr = HOST_MASK, .elem = 1 };
e                 418 net/netfilter/ipset/ip_set_hash_netiface.c 	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
e                 427 net/netfilter/ipset/ip_set_hash_netiface.c 		e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
e                 428 net/netfilter/ipset/ip_set_hash_netiface.c 		if (e.cidr > HOST_MASK)
e                 432 net/netfilter/ipset/ip_set_hash_netiface.c 	ip6_netmask(&e.ip, e.cidr);
e                 434 net/netfilter/ipset/ip_set_hash_netiface.c 	nla_strlcpy(e.iface, tb[IPSET_ATTR_IFACE], IFNAMSIZ);
e                 440 net/netfilter/ipset/ip_set_hash_netiface.c 			e.physdev = 1;
e                 445 net/netfilter/ipset/ip_set_hash_netiface.c 	ret = adtfn(set, &e, &ext, &ext, flags);
e                 132 net/netfilter/ipset/ip_set_hash_netnet.c hash_netnet4_init(struct hash_netnet4_elem *e)
e                 134 net/netfilter/ipset/ip_set_hash_netnet.c 	e->cidr[0] = HOST_MASK;
e                 135 net/netfilter/ipset/ip_set_hash_netnet.c 	e->cidr[1] = HOST_MASK;
e                 145 net/netfilter/ipset/ip_set_hash_netnet.c 	struct hash_netnet4_elem e = { };
e                 148 net/netfilter/ipset/ip_set_hash_netnet.c 	e.cidr[0] = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK);
e                 149 net/netfilter/ipset/ip_set_hash_netnet.c 	e.cidr[1] = INIT_CIDR(h->nets[0].cidr[1], HOST_MASK);
e                 151 net/netfilter/ipset/ip_set_hash_netnet.c 		e.ccmp = (HOST_MASK << (sizeof(e.cidr[0]) * 8)) | HOST_MASK;
e                 153 net/netfilter/ipset/ip_set_hash_netnet.c 	ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0]);
e                 154 net/netfilter/ipset/ip_set_hash_netnet.c 	ip4addrptr(skb, opt->flags & IPSET_DIM_TWO_SRC, &e.ip[1]);
e                 155 net/netfilter/ipset/ip_set_hash_netnet.c 	e.ip[0] &= ip_set_netmask(e.cidr[0]);
e                 156 net/netfilter/ipset/ip_set_hash_netnet.c 	e.ip[1] &= ip_set_netmask(e.cidr[1]);
e                 158 net/netfilter/ipset/ip_set_hash_netnet.c 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
e                 167 net/netfilter/ipset/ip_set_hash_netnet.c 	struct hash_netnet4_elem e = { };
e                 176 net/netfilter/ipset/ip_set_hash_netnet.c 	hash_netnet4_init(&e);
e                 194 net/netfilter/ipset/ip_set_hash_netnet.c 		e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
e                 195 net/netfilter/ipset/ip_set_hash_netnet.c 		if (!e.cidr[0] || e.cidr[0] > HOST_MASK)
e                 200 net/netfilter/ipset/ip_set_hash_netnet.c 		e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
e                 201 net/netfilter/ipset/ip_set_hash_netnet.c 		if (!e.cidr[1] || e.cidr[1] > HOST_MASK)
e                 214 net/netfilter/ipset/ip_set_hash_netnet.c 		e.ip[0] = htonl(ip & ip_set_hostmask(e.cidr[0]));
e                 215 net/netfilter/ipset/ip_set_hash_netnet.c 		e.ip[1] = htonl(ip2_from & ip_set_hostmask(e.cidr[1]));
e                 216 net/netfilter/ipset/ip_set_hash_netnet.c 		ret = adtfn(set, &e, &ext, &ext, flags);
e                 231 net/netfilter/ipset/ip_set_hash_netnet.c 		ip_set_mask_from_to(ip, ip_to, e.cidr[0]);
e                 244 net/netfilter/ipset/ip_set_hash_netnet.c 		ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
e                 255 net/netfilter/ipset/ip_set_hash_netnet.c 		e.ip[0] = htonl(ip);
e                 256 net/netfilter/ipset/ip_set_hash_netnet.c 		ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
e                 258 net/netfilter/ipset/ip_set_hash_netnet.c 			e.ip[1] = htonl(ip2);
e                 259 net/netfilter/ipset/ip_set_hash_netnet.c 			ip2 = ip_set_range_to_cidr(ip2, ip2_to, &e.cidr[1]);
e                 260 net/netfilter/ipset/ip_set_hash_netnet.c 			ret = adtfn(set, &e, &ext, &ext, flags);
e                 366 net/netfilter/ipset/ip_set_hash_netnet.c hash_netnet6_init(struct hash_netnet6_elem *e)
e                 368 net/netfilter/ipset/ip_set_hash_netnet.c 	e->cidr[0] = HOST_MASK;
e                 369 net/netfilter/ipset/ip_set_hash_netnet.c 	e->cidr[1] = HOST_MASK;
e                 379 net/netfilter/ipset/ip_set_hash_netnet.c 	struct hash_netnet6_elem e = { };
e                 382 net/netfilter/ipset/ip_set_hash_netnet.c 	e.cidr[0] = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK);
e                 383 net/netfilter/ipset/ip_set_hash_netnet.c 	e.cidr[1] = INIT_CIDR(h->nets[0].cidr[1], HOST_MASK);
e                 385 net/netfilter/ipset/ip_set_hash_netnet.c 		e.ccmp = (HOST_MASK << (sizeof(u8) * 8)) | HOST_MASK;
e                 387 net/netfilter/ipset/ip_set_hash_netnet.c 	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0].in6);
e                 388 net/netfilter/ipset/ip_set_hash_netnet.c 	ip6addrptr(skb, opt->flags & IPSET_DIM_TWO_SRC, &e.ip[1].in6);
e                 389 net/netfilter/ipset/ip_set_hash_netnet.c 	ip6_netmask(&e.ip[0], e.cidr[0]);
e                 390 net/netfilter/ipset/ip_set_hash_netnet.c 	ip6_netmask(&e.ip[1], e.cidr[1]);
e                 392 net/netfilter/ipset/ip_set_hash_netnet.c 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
e                 400 net/netfilter/ipset/ip_set_hash_netnet.c 	struct hash_netnet6_elem e = { };
e                 407 net/netfilter/ipset/ip_set_hash_netnet.c 	hash_netnet6_init(&e);
e                 414 net/netfilter/ipset/ip_set_hash_netnet.c 	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]);
e                 418 net/netfilter/ipset/ip_set_hash_netnet.c 	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip[1]);
e                 427 net/netfilter/ipset/ip_set_hash_netnet.c 		e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
e                 428 net/netfilter/ipset/ip_set_hash_netnet.c 		if (!e.cidr[0] || e.cidr[0] > HOST_MASK)
e                 433 net/netfilter/ipset/ip_set_hash_netnet.c 		e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
e                 434 net/netfilter/ipset/ip_set_hash_netnet.c 		if (!e.cidr[1] || e.cidr[1] > HOST_MASK)
e                 438 net/netfilter/ipset/ip_set_hash_netnet.c 	ip6_netmask(&e.ip[0], e.cidr[0]);
e                 439 net/netfilter/ipset/ip_set_hash_netnet.c 	ip6_netmask(&e.ip[1], e.cidr[1]);
e                 448 net/netfilter/ipset/ip_set_hash_netnet.c 	ret = adtfn(set, &e, &ext, &ext, flags);
e                 134 net/netfilter/ipset/ip_set_hash_netport.c 	struct hash_netport4_elem e = {
e                 140 net/netfilter/ipset/ip_set_hash_netport.c 		e.cidr = HOST_MASK - 1;
e                 143 net/netfilter/ipset/ip_set_hash_netport.c 				 &e.port, &e.proto))
e                 146 net/netfilter/ipset/ip_set_hash_netport.c 	ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
e                 147 net/netfilter/ipset/ip_set_hash_netport.c 	e.ip &= ip_set_netmask(e.cidr + 1);
e                 149 net/netfilter/ipset/ip_set_hash_netport.c 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
e                 158 net/netfilter/ipset/ip_set_hash_netport.c 	struct hash_netport4_elem e = { .cidr = HOST_MASK - 1 };
e                 186 net/netfilter/ipset/ip_set_hash_netport.c 		e.cidr = cidr - 1;
e                 189 net/netfilter/ipset/ip_set_hash_netport.c 	e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
e                 192 net/netfilter/ipset/ip_set_hash_netport.c 		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
e                 193 net/netfilter/ipset/ip_set_hash_netport.c 		with_ports = ip_set_proto_with_ports(e.proto);
e                 195 net/netfilter/ipset/ip_set_hash_netport.c 		if (e.proto == 0)
e                 201 net/netfilter/ipset/ip_set_hash_netport.c 	if (!(with_ports || e.proto == IPPROTO_ICMP))
e                 202 net/netfilter/ipset/ip_set_hash_netport.c 		e.port = 0;
e                 214 net/netfilter/ipset/ip_set_hash_netport.c 		e.ip = htonl(ip & ip_set_hostmask(e.cidr + 1));
e                 215 net/netfilter/ipset/ip_set_hash_netport.c 		ret = adtfn(set, &e, &ext, &ext, flags);
e                 220 net/netfilter/ipset/ip_set_hash_netport.c 	port = port_to = ntohs(e.port);
e                 235 net/netfilter/ipset/ip_set_hash_netport.c 		ip_set_mask_from_to(ip, ip_to, e.cidr + 1);
e                 245 net/netfilter/ipset/ip_set_hash_netport.c 		e.ip = htonl(ip);
e                 247 net/netfilter/ipset/ip_set_hash_netport.c 		e.cidr = cidr - 1;
e                 249 net/netfilter/ipset/ip_set_hash_netport.c 			e.port = htons(p);
e                 250 net/netfilter/ipset/ip_set_hash_netport.c 			ret = adtfn(set, &e, &ext, &ext, flags);
e                 350 net/netfilter/ipset/ip_set_hash_netport.c 	struct hash_netport6_elem e = {
e                 356 net/netfilter/ipset/ip_set_hash_netport.c 		e.cidr = HOST_MASK - 1;
e                 359 net/netfilter/ipset/ip_set_hash_netport.c 				 &e.port, &e.proto))
e                 362 net/netfilter/ipset/ip_set_hash_netport.c 	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
e                 363 net/netfilter/ipset/ip_set_hash_netport.c 	ip6_netmask(&e.ip, e.cidr + 1);
e                 365 net/netfilter/ipset/ip_set_hash_netport.c 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
e                 374 net/netfilter/ipset/ip_set_hash_netport.c 	struct hash_netport6_elem e = { .cidr = HOST_MASK  - 1 };
e                 392 net/netfilter/ipset/ip_set_hash_netport.c 	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
e                 404 net/netfilter/ipset/ip_set_hash_netport.c 		e.cidr = cidr - 1;
e                 406 net/netfilter/ipset/ip_set_hash_netport.c 	ip6_netmask(&e.ip, e.cidr + 1);
e                 408 net/netfilter/ipset/ip_set_hash_netport.c 	e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
e                 411 net/netfilter/ipset/ip_set_hash_netport.c 		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
e                 412 net/netfilter/ipset/ip_set_hash_netport.c 		with_ports = ip_set_proto_with_ports(e.proto);
e                 414 net/netfilter/ipset/ip_set_hash_netport.c 		if (e.proto == 0)
e                 420 net/netfilter/ipset/ip_set_hash_netport.c 	if (!(with_ports || e.proto == IPPROTO_ICMPV6))
e                 421 net/netfilter/ipset/ip_set_hash_netport.c 		e.port = 0;
e                 431 net/netfilter/ipset/ip_set_hash_netport.c 		ret = adtfn(set, &e, &ext, &ext, flags);
e                 436 net/netfilter/ipset/ip_set_hash_netport.c 	port = ntohs(e.port);
e                 444 net/netfilter/ipset/ip_set_hash_netport.c 		e.port = htons(port);
e                 445 net/netfilter/ipset/ip_set_hash_netport.c 		ret = adtfn(set, &e, &ext, &ext, flags);
e                 142 net/netfilter/ipset/ip_set_hash_netportnet.c hash_netportnet4_init(struct hash_netportnet4_elem *e)
e                 144 net/netfilter/ipset/ip_set_hash_netportnet.c 	e->cidr[0] = HOST_MASK;
e                 145 net/netfilter/ipset/ip_set_hash_netportnet.c 	e->cidr[1] = HOST_MASK;
e                 155 net/netfilter/ipset/ip_set_hash_netportnet.c 	struct hash_netportnet4_elem e = { };
e                 158 net/netfilter/ipset/ip_set_hash_netportnet.c 	e.cidr[0] = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK);
e                 159 net/netfilter/ipset/ip_set_hash_netportnet.c 	e.cidr[1] = INIT_CIDR(h->nets[0].cidr[1], HOST_MASK);
e                 161 net/netfilter/ipset/ip_set_hash_netportnet.c 		e.ccmp = (HOST_MASK << (sizeof(e.cidr[0]) * 8)) | HOST_MASK;
e                 164 net/netfilter/ipset/ip_set_hash_netportnet.c 				 &e.port, &e.proto))
e                 167 net/netfilter/ipset/ip_set_hash_netportnet.c 	ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0]);
e                 168 net/netfilter/ipset/ip_set_hash_netportnet.c 	ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip[1]);
e                 169 net/netfilter/ipset/ip_set_hash_netportnet.c 	e.ip[0] &= ip_set_netmask(e.cidr[0]);
e                 170 net/netfilter/ipset/ip_set_hash_netportnet.c 	e.ip[1] &= ip_set_netmask(e.cidr[1]);
e                 172 net/netfilter/ipset/ip_set_hash_netportnet.c 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
e                 181 net/netfilter/ipset/ip_set_hash_netportnet.c 	struct hash_netportnet4_elem e = { };
e                 191 net/netfilter/ipset/ip_set_hash_netportnet.c 	hash_netportnet4_init(&e);
e                 211 net/netfilter/ipset/ip_set_hash_netportnet.c 		e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
e                 212 net/netfilter/ipset/ip_set_hash_netportnet.c 		if (e.cidr[0] > HOST_MASK)
e                 217 net/netfilter/ipset/ip_set_hash_netportnet.c 		e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
e                 218 net/netfilter/ipset/ip_set_hash_netportnet.c 		if (e.cidr[1] > HOST_MASK)
e                 222 net/netfilter/ipset/ip_set_hash_netportnet.c 	e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
e                 225 net/netfilter/ipset/ip_set_hash_netportnet.c 		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
e                 226 net/netfilter/ipset/ip_set_hash_netportnet.c 		with_ports = ip_set_proto_with_ports(e.proto);
e                 228 net/netfilter/ipset/ip_set_hash_netportnet.c 		if (e.proto == 0)
e                 234 net/netfilter/ipset/ip_set_hash_netportnet.c 	if (!(with_ports || e.proto == IPPROTO_ICMP))
e                 235 net/netfilter/ipset/ip_set_hash_netportnet.c 		e.port = 0;
e                 247 net/netfilter/ipset/ip_set_hash_netportnet.c 		e.ip[0] = htonl(ip & ip_set_hostmask(e.cidr[0]));
e                 248 net/netfilter/ipset/ip_set_hash_netportnet.c 		e.ip[1] = htonl(ip2_from & ip_set_hostmask(e.cidr[1]));
e                 249 net/netfilter/ipset/ip_set_hash_netportnet.c 		ret = adtfn(set, &e, &ext, &ext, flags);
e                 264 net/netfilter/ipset/ip_set_hash_netportnet.c 		ip_set_mask_from_to(ip, ip_to, e.cidr[0]);
e                 267 net/netfilter/ipset/ip_set_hash_netportnet.c 	port_to = port = ntohs(e.port);
e                 284 net/netfilter/ipset/ip_set_hash_netportnet.c 		ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
e                 297 net/netfilter/ipset/ip_set_hash_netportnet.c 		e.ip[0] = htonl(ip);
e                 298 net/netfilter/ipset/ip_set_hash_netportnet.c 		ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
e                 300 net/netfilter/ipset/ip_set_hash_netportnet.c 			e.port = htons(p);
e                 302 net/netfilter/ipset/ip_set_hash_netportnet.c 				e.ip[1] = htonl(ip2);
e                 304 net/netfilter/ipset/ip_set_hash_netportnet.c 							   &e.cidr[1]);
e                 305 net/netfilter/ipset/ip_set_hash_netportnet.c 				ret = adtfn(set, &e, &ext, &ext, flags);
e                 421 net/netfilter/ipset/ip_set_hash_netportnet.c hash_netportnet6_init(struct hash_netportnet6_elem *e)
e                 423 net/netfilter/ipset/ip_set_hash_netportnet.c 	e->cidr[0] = HOST_MASK;
e                 424 net/netfilter/ipset/ip_set_hash_netportnet.c 	e->cidr[1] = HOST_MASK;
e                 434 net/netfilter/ipset/ip_set_hash_netportnet.c 	struct hash_netportnet6_elem e = { };
e                 437 net/netfilter/ipset/ip_set_hash_netportnet.c 	e.cidr[0] = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK);
e                 438 net/netfilter/ipset/ip_set_hash_netportnet.c 	e.cidr[1] = INIT_CIDR(h->nets[0].cidr[1], HOST_MASK);
e                 440 net/netfilter/ipset/ip_set_hash_netportnet.c 		e.ccmp = (HOST_MASK << (sizeof(u8) * 8)) | HOST_MASK;
e                 443 net/netfilter/ipset/ip_set_hash_netportnet.c 				 &e.port, &e.proto))
e                 446 net/netfilter/ipset/ip_set_hash_netportnet.c 	ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0].in6);
e                 447 net/netfilter/ipset/ip_set_hash_netportnet.c 	ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip[1].in6);
e                 448 net/netfilter/ipset/ip_set_hash_netportnet.c 	ip6_netmask(&e.ip[0], e.cidr[0]);
e                 449 net/netfilter/ipset/ip_set_hash_netportnet.c 	ip6_netmask(&e.ip[1], e.cidr[1]);
e                 451 net/netfilter/ipset/ip_set_hash_netportnet.c 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
e                 460 net/netfilter/ipset/ip_set_hash_netportnet.c 	struct hash_netportnet6_elem e = { };
e                 469 net/netfilter/ipset/ip_set_hash_netportnet.c 	hash_netportnet6_init(&e);
e                 478 net/netfilter/ipset/ip_set_hash_netportnet.c 	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]);
e                 482 net/netfilter/ipset/ip_set_hash_netportnet.c 	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip[1]);
e                 491 net/netfilter/ipset/ip_set_hash_netportnet.c 		e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
e                 492 net/netfilter/ipset/ip_set_hash_netportnet.c 		if (e.cidr[0] > HOST_MASK)
e                 497 net/netfilter/ipset/ip_set_hash_netportnet.c 		e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
e                 498 net/netfilter/ipset/ip_set_hash_netportnet.c 		if (e.cidr[1] > HOST_MASK)
e                 502 net/netfilter/ipset/ip_set_hash_netportnet.c 	ip6_netmask(&e.ip[0], e.cidr[0]);
e                 503 net/netfilter/ipset/ip_set_hash_netportnet.c 	ip6_netmask(&e.ip[1], e.cidr[1]);
e                 505 net/netfilter/ipset/ip_set_hash_netportnet.c 	e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
e                 508 net/netfilter/ipset/ip_set_hash_netportnet.c 		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
e                 509 net/netfilter/ipset/ip_set_hash_netportnet.c 		with_ports = ip_set_proto_with_ports(e.proto);
e                 511 net/netfilter/ipset/ip_set_hash_netportnet.c 		if (e.proto == 0)
e                 517 net/netfilter/ipset/ip_set_hash_netportnet.c 	if (!(with_ports || e.proto == IPPROTO_ICMPV6))
e                 518 net/netfilter/ipset/ip_set_hash_netportnet.c 		e.port = 0;
e                 528 net/netfilter/ipset/ip_set_hash_netportnet.c 		ret = adtfn(set, &e, &ext, &ext, flags);
e                 533 net/netfilter/ipset/ip_set_hash_netportnet.c 	port = ntohs(e.port);
e                 541 net/netfilter/ipset/ip_set_hash_netportnet.c 		e.port = htons(port);
e                 542 net/netfilter/ipset/ip_set_hash_netportnet.c 		ret = adtfn(set, &e, &ext, &ext, flags);
e                  55 net/netfilter/ipset/ip_set_list_set.c 	struct set_elem *e;
e                  63 net/netfilter/ipset/ip_set_list_set.c 	list_for_each_entry_rcu(e, &map->members, list) {
e                  64 net/netfilter/ipset/ip_set_list_set.c 		ret = ip_set_test(e->id, skb, par, opt);
e                  67 net/netfilter/ipset/ip_set_list_set.c 		if (ip_set_match_extensions(set, ext, mext, flags, e))
e                  79 net/netfilter/ipset/ip_set_list_set.c 	struct set_elem *e;
e                  82 net/netfilter/ipset/ip_set_list_set.c 	list_for_each_entry(e, &map->members, list) {
e                  84 net/netfilter/ipset/ip_set_list_set.c 		    ip_set_timeout_expired(ext_timeout(e, set)))
e                  86 net/netfilter/ipset/ip_set_list_set.c 		ret = ip_set_add(e->id, skb, par, opt);
e                  99 net/netfilter/ipset/ip_set_list_set.c 	struct set_elem *e;
e                 102 net/netfilter/ipset/ip_set_list_set.c 	list_for_each_entry(e, &map->members, list) {
e                 104 net/netfilter/ipset/ip_set_list_set.c 		    ip_set_timeout_expired(ext_timeout(e, set)))
e                 106 net/netfilter/ipset/ip_set_list_set.c 		ret = ip_set_del(e->id, skb, par, opt);
e                 145 net/netfilter/ipset/ip_set_list_set.c 	struct set_elem *e = container_of(rcu, struct set_elem, rcu);
e                 146 net/netfilter/ipset/ip_set_list_set.c 	struct ip_set *set = e->set;
e                 148 net/netfilter/ipset/ip_set_list_set.c 	ip_set_ext_destroy(set, e);
e                 149 net/netfilter/ipset/ip_set_list_set.c 	kfree(e);
e                 153 net/netfilter/ipset/ip_set_list_set.c list_set_del(struct ip_set *set, struct set_elem *e)
e                 158 net/netfilter/ipset/ip_set_list_set.c 	list_del_rcu(&e->list);
e                 159 net/netfilter/ipset/ip_set_list_set.c 	ip_set_put_byindex(map->net, e->id);
e                 160 net/netfilter/ipset/ip_set_list_set.c 	call_rcu(&e->rcu, __list_set_del_rcu);
e                 164 net/netfilter/ipset/ip_set_list_set.c list_set_replace(struct ip_set *set, struct set_elem *e, struct set_elem *old)
e                 168 net/netfilter/ipset/ip_set_list_set.c 	list_replace_rcu(&old->list, &e->list);
e                 177 net/netfilter/ipset/ip_set_list_set.c 	struct set_elem *e, *n;
e                 179 net/netfilter/ipset/ip_set_list_set.c 	list_for_each_entry_safe(e, n, &map->members, list)
e                 180 net/netfilter/ipset/ip_set_list_set.c 		if (ip_set_timeout_expired(ext_timeout(e, set)))
e                 181 net/netfilter/ipset/ip_set_list_set.c 			list_set_del(set, e);
e                 190 net/netfilter/ipset/ip_set_list_set.c 	struct set_elem *e, *next, *prev = NULL;
e                 193 net/netfilter/ipset/ip_set_list_set.c 	list_for_each_entry(e, &map->members, list) {
e                 195 net/netfilter/ipset/ip_set_list_set.c 		    ip_set_timeout_expired(ext_timeout(e, set)))
e                 197 net/netfilter/ipset/ip_set_list_set.c 		else if (e->id != d->id) {
e                 198 net/netfilter/ipset/ip_set_list_set.c 			prev = e;
e                 205 net/netfilter/ipset/ip_set_list_set.c 			next = list_next_entry(e, list);
e                 206 net/netfilter/ipset/ip_set_list_set.c 			ret = !list_is_last(&e->list, &map->members) &&
e                 218 net/netfilter/ipset/ip_set_list_set.c 			 struct set_elem *e)
e                 221 net/netfilter/ipset/ip_set_list_set.c 		ip_set_init_counter(ext_counter(e, set), ext);
e                 223 net/netfilter/ipset/ip_set_list_set.c 		ip_set_init_comment(set, ext_comment(e, set), ext);
e                 225 net/netfilter/ipset/ip_set_list_set.c 		ip_set_init_skbinfo(ext_skbinfo(e, set), ext);
e                 228 net/netfilter/ipset/ip_set_list_set.c 		ip_set_timeout_set(ext_timeout(e, set), ext->timeout);
e                 237 net/netfilter/ipset/ip_set_list_set.c 	struct set_elem *e, *n, *prev, *next;
e                 242 net/netfilter/ipset/ip_set_list_set.c 	list_for_each_entry(e, &map->members, list) {
e                 244 net/netfilter/ipset/ip_set_list_set.c 		    ip_set_timeout_expired(ext_timeout(e, set)))
e                 246 net/netfilter/ipset/ip_set_list_set.c 		else if (d->id == e->id)
e                 247 net/netfilter/ipset/ip_set_list_set.c 			n = e;
e                 248 net/netfilter/ipset/ip_set_list_set.c 		else if (d->before == 0 || e->id != d->refid)
e                 251 net/netfilter/ipset/ip_set_list_set.c 			next = e;
e                 253 net/netfilter/ipset/ip_set_list_set.c 			prev = e;
e                 293 net/netfilter/ipset/ip_set_list_set.c 	e = kzalloc(set->dsize, GFP_ATOMIC);
e                 294 net/netfilter/ipset/ip_set_list_set.c 	if (!e)
e                 296 net/netfilter/ipset/ip_set_list_set.c 	e->id = d->id;
e                 297 net/netfilter/ipset/ip_set_list_set.c 	e->set = set;
e                 298 net/netfilter/ipset/ip_set_list_set.c 	INIT_LIST_HEAD(&e->list);
e                 299 net/netfilter/ipset/ip_set_list_set.c 	list_set_init_extensions(set, ext, e);
e                 301 net/netfilter/ipset/ip_set_list_set.c 		list_set_replace(set, e, n);
e                 303 net/netfilter/ipset/ip_set_list_set.c 		list_add_tail_rcu(&e->list, &next->list);
e                 305 net/netfilter/ipset/ip_set_list_set.c 		list_add_rcu(&e->list, &prev->list);
e                 307 net/netfilter/ipset/ip_set_list_set.c 		list_add_tail_rcu(&e->list, &map->members);
e                 319 net/netfilter/ipset/ip_set_list_set.c 	struct set_elem *e, *next, *prev = NULL;
e                 321 net/netfilter/ipset/ip_set_list_set.c 	list_for_each_entry(e, &map->members, list) {
e                 323 net/netfilter/ipset/ip_set_list_set.c 		    ip_set_timeout_expired(ext_timeout(e, set)))
e                 325 net/netfilter/ipset/ip_set_list_set.c 		else if (e->id != d->id) {
e                 326 net/netfilter/ipset/ip_set_list_set.c 			prev = e;
e                 331 net/netfilter/ipset/ip_set_list_set.c 			next = list_next_entry(e, list);
e                 332 net/netfilter/ipset/ip_set_list_set.c 			if (list_is_last(&e->list, &map->members) ||
e                 339 net/netfilter/ipset/ip_set_list_set.c 		list_set_del(set, e);
e                 351 net/netfilter/ipset/ip_set_list_set.c 	struct set_adt_elem e = { .refid = IPSET_INVALID_ID };
e                 366 net/netfilter/ipset/ip_set_list_set.c 	e.id = ip_set_get_byname(map->net, nla_data(tb[IPSET_ATTR_NAME]), &s);
e                 367 net/netfilter/ipset/ip_set_list_set.c 	if (e.id == IPSET_INVALID_ID)
e                 378 net/netfilter/ipset/ip_set_list_set.c 		e.before = f & IPSET_FLAG_BEFORE;
e                 381 net/netfilter/ipset/ip_set_list_set.c 	if (e.before && !tb[IPSET_ATTR_NAMEREF]) {
e                 387 net/netfilter/ipset/ip_set_list_set.c 		e.refid = ip_set_get_byname(map->net,
e                 390 net/netfilter/ipset/ip_set_list_set.c 		if (e.refid == IPSET_INVALID_ID) {
e                 394 net/netfilter/ipset/ip_set_list_set.c 		if (!e.before)
e                 395 net/netfilter/ipset/ip_set_list_set.c 			e.before = -1;
e                 400 net/netfilter/ipset/ip_set_list_set.c 	ret = adtfn(set, &e, &ext, &ext, flags);
e                 403 net/netfilter/ipset/ip_set_list_set.c 	if (e.refid != IPSET_INVALID_ID)
e                 404 net/netfilter/ipset/ip_set_list_set.c 		ip_set_put_byindex(map->net, e.refid);
e                 406 net/netfilter/ipset/ip_set_list_set.c 		ip_set_put_byindex(map->net, e.id);
e                 415 net/netfilter/ipset/ip_set_list_set.c 	struct set_elem *e, *n;
e                 417 net/netfilter/ipset/ip_set_list_set.c 	list_for_each_entry_safe(e, n, &map->members, list)
e                 418 net/netfilter/ipset/ip_set_list_set.c 		list_set_del(set, e);
e                 427 net/netfilter/ipset/ip_set_list_set.c 	struct set_elem *e, *n;
e                 432 net/netfilter/ipset/ip_set_list_set.c 	list_for_each_entry_safe(e, n, &map->members, list) {
e                 433 net/netfilter/ipset/ip_set_list_set.c 		list_del(&e->list);
e                 434 net/netfilter/ipset/ip_set_list_set.c 		ip_set_put_byindex(map->net, e->id);
e                 435 net/netfilter/ipset/ip_set_list_set.c 		ip_set_ext_destroy(set, e);
e                 436 net/netfilter/ipset/ip_set_list_set.c 		kfree(e);
e                 447 net/netfilter/ipset/ip_set_list_set.c 	struct set_elem *e;
e                 451 net/netfilter/ipset/ip_set_list_set.c 	list_for_each_entry_rcu(e, &map->members, list)
e                 490 net/netfilter/ipset/ip_set_list_set.c 	struct set_elem *e;
e                 498 net/netfilter/ipset/ip_set_list_set.c 	list_for_each_entry_rcu(e, &map->members, list) {
e                 501 net/netfilter/ipset/ip_set_list_set.c 		     ip_set_timeout_expired(ext_timeout(e, set)))) {
e                 508 net/netfilter/ipset/ip_set_list_set.c 		ip_set_name_byindex(map->net, e->id, name);
e                 511 net/netfilter/ipset/ip_set_list_set.c 		if (ip_set_put_extensions(skb, set, e, true))
e                 546 net/netfilter/ipvs/ip_vs_app.c 	struct list_head *e;
e                 557 net/netfilter/ipvs/ip_vs_app.c 	if ((e = inc->a_list.next) != &app->incs_list)
e                 558 net/netfilter/ipvs/ip_vs_app.c 		return list_entry(e, struct ip_vs_app, a_list);
e                 561 net/netfilter/ipvs/ip_vs_app.c 	for (e = app->a_list.next; e != &ipvs->app_list; e = e->next) {
e                 562 net/netfilter/ipvs/ip_vs_app.c 		app = list_entry(e, struct ip_vs_app, a_list);
e                1048 net/netfilter/ipvs/ip_vs_conn.c 	struct hlist_node *e;
e                1057 net/netfilter/ipvs/ip_vs_conn.c 	e = rcu_dereference(hlist_next_rcu(&cp->c_list));
e                1058 net/netfilter/ipvs/ip_vs_conn.c 	if (e)
e                1059 net/netfilter/ipvs/ip_vs_conn.c 		return hlist_entry(e, struct ip_vs_conn, c_list);
e                2063 net/netfilter/ipvs/ip_vs_ctl.c 	struct hlist_node *e;
e                2076 net/netfilter/ipvs/ip_vs_ctl.c 		e = rcu_dereference(hlist_next_rcu(&svc->s_list));
e                2077 net/netfilter/ipvs/ip_vs_ctl.c 		if (e)
e                2078 net/netfilter/ipvs/ip_vs_ctl.c 			return hlist_entry(e, struct ip_vs_service, s_list);
e                2094 net/netfilter/ipvs/ip_vs_ctl.c 	e = rcu_dereference(hlist_next_rcu(&svc->f_list));
e                2095 net/netfilter/ipvs/ip_vs_ctl.c 	if (e)
e                2096 net/netfilter/ipvs/ip_vs_ctl.c 		return hlist_entry(e, struct ip_vs_service, f_list);
e                  98 net/netfilter/ipvs/ip_vs_est.c 	struct ip_vs_estimator *e;
e                 104 net/netfilter/ipvs/ip_vs_est.c 	list_for_each_entry(e, &ipvs->est_list, list) {
e                 105 net/netfilter/ipvs/ip_vs_est.c 		s = container_of(e, struct ip_vs_stats, est);
e                 111 net/netfilter/ipvs/ip_vs_est.c 		rate = (s->kstats.conns - e->last_conns) << 9;
e                 112 net/netfilter/ipvs/ip_vs_est.c 		e->last_conns = s->kstats.conns;
e                 113 net/netfilter/ipvs/ip_vs_est.c 		e->cps += ((s64)rate - (s64)e->cps) >> 2;
e                 115 net/netfilter/ipvs/ip_vs_est.c 		rate = (s->kstats.inpkts - e->last_inpkts) << 9;
e                 116 net/netfilter/ipvs/ip_vs_est.c 		e->last_inpkts = s->kstats.inpkts;
e                 117 net/netfilter/ipvs/ip_vs_est.c 		e->inpps += ((s64)rate - (s64)e->inpps) >> 2;
e                 119 net/netfilter/ipvs/ip_vs_est.c 		rate = (s->kstats.outpkts - e->last_outpkts) << 9;
e                 120 net/netfilter/ipvs/ip_vs_est.c 		e->last_outpkts = s->kstats.outpkts;
e                 121 net/netfilter/ipvs/ip_vs_est.c 		e->outpps += ((s64)rate - (s64)e->outpps) >> 2;
e                 124 net/netfilter/ipvs/ip_vs_est.c 		rate = (s->kstats.inbytes - e->last_inbytes) << 4;
e                 125 net/netfilter/ipvs/ip_vs_est.c 		e->last_inbytes = s->kstats.inbytes;
e                 126 net/netfilter/ipvs/ip_vs_est.c 		e->inbps += ((s64)rate - (s64)e->inbps) >> 2;
e                 128 net/netfilter/ipvs/ip_vs_est.c 		rate = (s->kstats.outbytes - e->last_outbytes) << 4;
e                 129 net/netfilter/ipvs/ip_vs_est.c 		e->last_outbytes = s->kstats.outbytes;
e                 130 net/netfilter/ipvs/ip_vs_est.c 		e->outbps += ((s64)rate - (s64)e->outbps) >> 2;
e                 178 net/netfilter/ipvs/ip_vs_est.c 	struct ip_vs_estimator *e = &stats->est;
e                 180 net/netfilter/ipvs/ip_vs_est.c 	dst->cps = (e->cps + 0x1FF) >> 10;
e                 181 net/netfilter/ipvs/ip_vs_est.c 	dst->inpps = (e->inpps + 0x1FF) >> 10;
e                 182 net/netfilter/ipvs/ip_vs_est.c 	dst->outpps = (e->outpps + 0x1FF) >> 10;
e                 183 net/netfilter/ipvs/ip_vs_est.c 	dst->inbps = (e->inbps + 0xF) >> 5;
e                 184 net/netfilter/ipvs/ip_vs_est.c 	dst->outbps = (e->outbps + 0xF) >> 5;
e                 102 net/netfilter/ipvs/ip_vs_lblcr.c 	struct ip_vs_dest_set_elem *e;
e                 105 net/netfilter/ipvs/ip_vs_lblcr.c 		list_for_each_entry(e, &set->list, list) {
e                 106 net/netfilter/ipvs/ip_vs_lblcr.c 			if (e->dest == dest)
e                 111 net/netfilter/ipvs/ip_vs_lblcr.c 	e = kmalloc(sizeof(*e), GFP_ATOMIC);
e                 112 net/netfilter/ipvs/ip_vs_lblcr.c 	if (e == NULL)
e                 116 net/netfilter/ipvs/ip_vs_lblcr.c 	e->dest = dest;
e                 118 net/netfilter/ipvs/ip_vs_lblcr.c 	list_add_rcu(&e->list, &set->list);
e                 126 net/netfilter/ipvs/ip_vs_lblcr.c 	struct ip_vs_dest_set_elem *e;
e                 128 net/netfilter/ipvs/ip_vs_lblcr.c 	e = container_of(head, struct ip_vs_dest_set_elem, rcu_head);
e                 129 net/netfilter/ipvs/ip_vs_lblcr.c 	ip_vs_dest_put_and_free(e->dest);
e                 130 net/netfilter/ipvs/ip_vs_lblcr.c 	kfree(e);
e                 136 net/netfilter/ipvs/ip_vs_lblcr.c 	struct ip_vs_dest_set_elem *e;
e                 138 net/netfilter/ipvs/ip_vs_lblcr.c 	list_for_each_entry(e, &set->list, list) {
e                 139 net/netfilter/ipvs/ip_vs_lblcr.c 		if (e->dest == dest) {
e                 143 net/netfilter/ipvs/ip_vs_lblcr.c 			list_del_rcu(&e->list);
e                 144 net/netfilter/ipvs/ip_vs_lblcr.c 			call_rcu(&e->rcu_head, ip_vs_lblcr_elem_rcu_free);
e                 152 net/netfilter/ipvs/ip_vs_lblcr.c 	struct ip_vs_dest_set_elem *e, *ep;
e                 154 net/netfilter/ipvs/ip_vs_lblcr.c 	list_for_each_entry_safe(e, ep, &set->list, list) {
e                 155 net/netfilter/ipvs/ip_vs_lblcr.c 		list_del_rcu(&e->list);
e                 156 net/netfilter/ipvs/ip_vs_lblcr.c 		call_rcu(&e->rcu_head, ip_vs_lblcr_elem_rcu_free);
e                 163 net/netfilter/ipvs/ip_vs_lblcr.c 	struct ip_vs_dest_set_elem *e;
e                 168 net/netfilter/ipvs/ip_vs_lblcr.c 	list_for_each_entry_rcu(e, &set->list, list) {
e                 169 net/netfilter/ipvs/ip_vs_lblcr.c 		least = e->dest;
e                 183 net/netfilter/ipvs/ip_vs_lblcr.c 	list_for_each_entry_continue_rcu(e, &set->list, list) {
e                 184 net/netfilter/ipvs/ip_vs_lblcr.c 		dest = e->dest;
e                 212 net/netfilter/ipvs/ip_vs_lblcr.c 	struct ip_vs_dest_set_elem *e;
e                 220 net/netfilter/ipvs/ip_vs_lblcr.c 	list_for_each_entry(e, &set->list, list) {
e                 221 net/netfilter/ipvs/ip_vs_lblcr.c 		most = e->dest;
e                 231 net/netfilter/ipvs/ip_vs_lblcr.c 	list_for_each_entry_continue(e, &set->list, list) {
e                 232 net/netfilter/ipvs/ip_vs_lblcr.c 		dest = e->dest;
e                  52 net/netfilter/nf_conntrack_ecache.c 		struct nf_conntrack_ecache *e;
e                  57 net/netfilter/nf_conntrack_ecache.c 		e = nf_ct_ecache_find(ct);
e                  58 net/netfilter/nf_conntrack_ecache.c 		if (!e || e->state != NFCT_ECACHE_DESTROY_FAIL)
e                  66 net/netfilter/nf_conntrack_ecache.c 		e->state = NFCT_ECACHE_DESTROY_SENT;
e                 126 net/netfilter/nf_conntrack_ecache.c 	struct nf_conntrack_ecache *e;
e                 133 net/netfilter/nf_conntrack_ecache.c 	e = nf_ct_ecache_find(ct);
e                 134 net/netfilter/nf_conntrack_ecache.c 	if (!e)
e                 140 net/netfilter/nf_conntrack_ecache.c 			.portid	= e->portid ? e->portid : portid,
e                 144 net/netfilter/nf_conntrack_ecache.c 		unsigned long missed = e->portid ? 0 : e->missed;
e                 146 net/netfilter/nf_conntrack_ecache.c 		if (!((eventmask | missed) & e->ctmask))
e                 158 net/netfilter/nf_conntrack_ecache.c 					if (e->portid == 0 && portid != 0)
e                 159 net/netfilter/nf_conntrack_ecache.c 						e->portid = portid;
e                 160 net/netfilter/nf_conntrack_ecache.c 					e->state = NFCT_ECACHE_DESTROY_FAIL;
e                 162 net/netfilter/nf_conntrack_ecache.c 					e->missed |= eventmask;
e                 165 net/netfilter/nf_conntrack_ecache.c 				e->missed &= ~missed;
e                 183 net/netfilter/nf_conntrack_ecache.c 	struct nf_conntrack_ecache *e;
e                 192 net/netfilter/nf_conntrack_ecache.c 	e = nf_ct_ecache_find(ct);
e                 193 net/netfilter/nf_conntrack_ecache.c 	if (e == NULL)
e                 196 net/netfilter/nf_conntrack_ecache.c 	events = xchg(&e->cache, 0);
e                 204 net/netfilter/nf_conntrack_ecache.c 	missed = e->missed;
e                 206 net/netfilter/nf_conntrack_ecache.c 	if (!((events | missed) & e->ctmask))
e                 220 net/netfilter/nf_conntrack_ecache.c 		e->missed |= events;
e                 222 net/netfilter/nf_conntrack_ecache.c 		e->missed &= ~missed;
e                 237 net/netfilter/nf_conntrack_ecache.c 	struct nf_conntrack_ecache *e;
e                 244 net/netfilter/nf_conntrack_ecache.c 	e = nf_ct_ecache_find(exp->master);
e                 245 net/netfilter/nf_conntrack_ecache.c 	if (!e)
e                 248 net/netfilter/nf_conntrack_ecache.c 	if (e->expmask & (1 << event)) {
e                 490 net/netfilter/nf_conntrack_expect.c void nf_ct_expect_iterate_destroy(bool (*iter)(struct nf_conntrack_expect *e, void *data),
e                 515 net/netfilter/nf_conntrack_expect.c 			      bool (*iter)(struct nf_conntrack_expect *e, void *data),
e                 155 net/netfilter/nf_flow_table_core.c 	struct flow_offload_entry *e;
e                 159 net/netfilter/nf_flow_table_core.c 	e = container_of(flow, struct flow_offload_entry, flow);
e                 161 net/netfilter/nf_flow_table_core.c 		nf_ct_delete(e->ct, 0, 0);
e                 162 net/netfilter/nf_flow_table_core.c 	nf_ct_put(e->ct);
e                 163 net/netfilter/nf_flow_table_core.c 	kfree_rcu(e, rcu_head);
e                 235 net/netfilter/nf_flow_table_core.c 	struct flow_offload_entry *e;
e                 244 net/netfilter/nf_flow_table_core.c 	e = container_of(flow, struct flow_offload_entry, flow);
e                 245 net/netfilter/nf_flow_table_core.c 	clear_bit(IPS_OFFLOAD_BIT, &e->ct->status);
e                 248 net/netfilter/nf_flow_table_core.c 		flow_offload_fixup_ct(e->ct);
e                 250 net/netfilter/nf_flow_table_core.c 		flow_offload_fixup_ct_timeout(e->ct);
e                 257 net/netfilter/nf_flow_table_core.c 	struct flow_offload_entry *e;
e                 261 net/netfilter/nf_flow_table_core.c 	e = container_of(flow, struct flow_offload_entry, flow);
e                 262 net/netfilter/nf_flow_table_core.c 	flow_offload_fixup_ct_state(e->ct);
e                 272 net/netfilter/nf_flow_table_core.c 	struct flow_offload_entry *e;
e                 285 net/netfilter/nf_flow_table_core.c 	e = container_of(flow, struct flow_offload_entry, flow);
e                 286 net/netfilter/nf_flow_table_core.c 	if (unlikely(nf_ct_is_dying(e->ct)))
e                 330 net/netfilter/nf_flow_table_core.c 	struct flow_offload_entry *e;
e                 332 net/netfilter/nf_flow_table_core.c 	e = container_of(flow, struct flow_offload_entry, flow);
e                 333 net/netfilter/nf_flow_table_core.c 	if (nf_flow_has_expired(flow) || nf_ct_is_dying(e->ct) ||
e                 488 net/netfilter/nf_flow_table_core.c 	struct flow_offload_entry *e;
e                 490 net/netfilter/nf_flow_table_core.c 	e = container_of(flow, struct flow_offload_entry, flow);
e                 496 net/netfilter/nf_flow_table_core.c 	if (net_eq(nf_ct_net(e->ct), dev_net(dev)) &&
e                 753 net/netfilter/nf_nat_core.c 			struct nf_hook_entries *e = rcu_dereference(lpriv->entries);
e                 757 net/netfilter/nf_nat_core.c 			if (!e)
e                 760 net/netfilter/nf_nat_core.c 			for (i = 0; i < e->num_hook_entries; i++) {
e                 761 net/netfilter/nf_nat_core.c 				ret = e->hooks[i].hook(e->hooks[i].priv, skb,
e                 128 net/netfilter/nf_tables_core.c 	unsigned long e = (unsigned long)expr->ops->eval;
e                 129 net/netfilter/nf_tables_core.c #define X(e, fun) \
e                 130 net/netfilter/nf_tables_core.c 	do { if ((e) == (unsigned long)(fun)) \
e                 133 net/netfilter/nf_tables_core.c 	X(e, nft_payload_eval);
e                 134 net/netfilter/nf_tables_core.c 	X(e, nft_cmp_eval);
e                 135 net/netfilter/nf_tables_core.c 	X(e, nft_meta_get_eval);
e                 136 net/netfilter/nf_tables_core.c 	X(e, nft_lookup_eval);
e                 137 net/netfilter/nf_tables_core.c 	X(e, nft_range_eval);
e                 138 net/netfilter/nf_tables_core.c 	X(e, nft_immediate_eval);
e                 139 net/netfilter/nf_tables_core.c 	X(e, nft_byteorder_eval);
e                 140 net/netfilter/nf_tables_core.c 	X(e, nft_dynset_eval);
e                 141 net/netfilter/nf_tables_core.c 	X(e, nft_rt_get_eval);
e                 142 net/netfilter/nf_tables_core.c 	X(e, nft_bitwise_eval);
e                 711 net/netfilter/nfnetlink_queue.c nf_queue_entry_dup(struct nf_queue_entry *e)
e                 713 net/netfilter/nfnetlink_queue.c 	struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC);
e                 842 net/netfilter/nfnetlink_queue.c nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e, int diff)
e                 847 net/netfilter/nfnetlink_queue.c 		if (pskb_trim(e->skb, data_len))
e                 852 net/netfilter/nfnetlink_queue.c 		if (diff > skb_tailroom(e->skb)) {
e                 853 net/netfilter/nfnetlink_queue.c 			nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
e                 857 net/netfilter/nfnetlink_queue.c 			kfree_skb(e->skb);
e                 858 net/netfilter/nfnetlink_queue.c 			e->skb = nskb;
e                 860 net/netfilter/nfnetlink_queue.c 		skb_put(e->skb, diff);
e                 862 net/netfilter/nfnetlink_queue.c 	if (skb_ensure_writable(e->skb, data_len))
e                 864 net/netfilter/nfnetlink_queue.c 	skb_copy_to_linear_data(e->skb, data, data_len);
e                 865 net/netfilter/nfnetlink_queue.c 	e->skb->ip_summed = CHECKSUM_NONE;
e                 226 net/netfilter/nft_compat.c 	union nft_entry e = {};
e                 237 net/netfilter/nft_compat.c 	nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv);
e                 441 net/netfilter/nft_compat.c 	union nft_entry e = {};
e                 452 net/netfilter/nft_compat.c 	nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv);
e                 538 net/netfilter/nft_compat.c static int nft_match_large_dump(struct sk_buff *skb, const struct nft_expr *e)
e                 540 net/netfilter/nft_compat.c 	struct nft_xt_match_priv *priv = nft_expr_priv(e);
e                 542 net/netfilter/nft_compat.c 	return __nft_match_dump(skb, e, priv->info);
e                 322 net/netfilter/nft_ct.c 		struct nf_conntrack_ecache *e = nf_ct_ecache_find(ct);
e                 325 net/netfilter/nft_ct.c 		if (e) {
e                 326 net/netfilter/nft_ct.c 			if (e->ctmask != ctmask)
e                 327 net/netfilter/nft_ct.c 				e->ctmask = ctmask;
e                 807 net/netfilter/x_tables.c 	const char *e = base;
e                 815 net/netfilter/x_tables.c 	t = (void *)(e + target_offset);
e                 900 net/netfilter/x_tables.c 	const char *e = base;
e                 909 net/netfilter/x_tables.c 	t = (void *)(e + target_offset);
e                  57 net/netfilter/xt_CT.c 		const struct ipt_entry *e = par->entryinfo;
e                  59 net/netfilter/xt_CT.c 		if (e->ip.invflags & IPT_INV_PROTO)
e                  61 net/netfilter/xt_CT.c 		return e->ip.proto;
e                  63 net/netfilter/xt_CT.c 		const struct ip6t_entry *e = par->entryinfo;
e                  65 net/netfilter/xt_CT.c 		if (e->ipv6.invflags & IP6T_INV_PROTO)
e                  67 net/netfilter/xt_CT.c 		return e->ipv6.proto;
e                 266 net/netfilter/xt_TCPMSS.c 	const struct ipt_entry *e = par->entryinfo;
e                 279 net/netfilter/xt_TCPMSS.c 	xt_ematch_foreach(ematch, e)
e                 290 net/netfilter/xt_TCPMSS.c 	const struct ip6t_entry *e = par->entryinfo;
e                 303 net/netfilter/xt_TCPMSS.c 	xt_ematch_foreach(ematch, e)
e                 262 net/netfilter/xt_l2tp.c 	const struct ipt_entry *e = par->entryinfo;
e                 263 net/netfilter/xt_l2tp.c 	const struct ipt_ip *ip = &e->ip;
e                 289 net/netfilter/xt_l2tp.c 	const struct ip6t_entry *e = par->entryinfo;
e                 290 net/netfilter/xt_l2tp.c 	const struct ip6t_ip6 *ip = &e->ipv6;
e                  32 net/netfilter/xt_multiport.c 	u_int16_t s, e;
e                  39 net/netfilter/xt_multiport.c 			e = minfo->ports[++i];
e                  40 net/netfilter/xt_multiport.c 			pr_debug("src or dst matches with %d-%d?\n", s, e);
e                  44 net/netfilter/xt_multiport.c 				if (src >= s && src <= e)
e                  48 net/netfilter/xt_multiport.c 				if (dst >= s && dst <= e)
e                  52 net/netfilter/xt_multiport.c 				if ((dst >= s && dst <= e) ||
e                  53 net/netfilter/xt_multiport.c 				    (src >= s && src <= e))
e                  35 net/netfilter/xt_policy.c match_xfrm_state(const struct xfrm_state *x, const struct xt_policy_elem *e,
e                  38 net/netfilter/xt_policy.c #define MATCH_ADDR(x,y,z)	(!e->match.x ||			       \
e                  39 net/netfilter/xt_policy.c 				 (xt_addr_cmp(&e->x, &e->y, (const union nf_inet_addr *)(z), family) \
e                  40 net/netfilter/xt_policy.c 				  ^ e->invert.x))
e                  41 net/netfilter/xt_policy.c #define MATCH(x,y)		(!e->match.x || ((e->x == (y)) ^ e->invert.x))
e                  55 net/netfilter/xt_policy.c 	const struct xt_policy_elem *e;
e                  69 net/netfilter/xt_policy.c 		e = &info->pol[pos];
e                  71 net/netfilter/xt_policy.c 		if (match_xfrm_state(sp->xvec[i], e, family)) {
e                  85 net/netfilter/xt_policy.c 	const struct xt_policy_elem *e;
e                  98 net/netfilter/xt_policy.c 		e = &info->pol[pos];
e                 100 net/netfilter/xt_policy.c 		if (match_xfrm_state(dst->xfrm, e, family)) {
e                 128 net/netfilter/xt_recent.c 	struct recent_entry *e;
e                 136 net/netfilter/xt_recent.c 	list_for_each_entry(e, &table->iphash[h], list)
e                 137 net/netfilter/xt_recent.c 		if (e->family == family &&
e                 138 net/netfilter/xt_recent.c 		    memcmp(&e->addr, addrp, sizeof(e->addr)) == 0 &&
e                 139 net/netfilter/xt_recent.c 		    (ttl == e->ttl || ttl == 0 || e->ttl == 0))
e                 140 net/netfilter/xt_recent.c 			return e;
e                 144 net/netfilter/xt_recent.c static void recent_entry_remove(struct recent_table *t, struct recent_entry *e)
e                 146 net/netfilter/xt_recent.c 	list_del(&e->list);
e                 147 net/netfilter/xt_recent.c 	list_del(&e->lru_list);
e                 148 net/netfilter/xt_recent.c 	kfree(e);
e                 157 net/netfilter/xt_recent.c 	struct recent_entry *e;
e                 162 net/netfilter/xt_recent.c 	e = list_entry(t->lru_list.next, struct recent_entry, lru_list);
e                 167 net/netfilter/xt_recent.c 	if (time_after(time, e->stamps[e->index-1]))
e                 168 net/netfilter/xt_recent.c 		recent_entry_remove(t, e);
e                 175 net/netfilter/xt_recent.c 	struct recent_entry *e;
e                 179 net/netfilter/xt_recent.c 		e = list_entry(t->lru_list.next, struct recent_entry, lru_list);
e                 180 net/netfilter/xt_recent.c 		recent_entry_remove(t, e);
e                 184 net/netfilter/xt_recent.c 	e = kmalloc(struct_size(e, stamps, nstamps_max), GFP_ATOMIC);
e                 185 net/netfilter/xt_recent.c 	if (e == NULL)
e                 187 net/netfilter/xt_recent.c 	memcpy(&e->addr, addr, sizeof(e->addr));
e                 188 net/netfilter/xt_recent.c 	e->ttl       = ttl;
e                 189 net/netfilter/xt_recent.c 	e->stamps[0] = jiffies;
e                 190 net/netfilter/xt_recent.c 	e->nstamps   = 1;
e                 191 net/netfilter/xt_recent.c 	e->index     = 1;
e                 192 net/netfilter/xt_recent.c 	e->family    = family;
e                 194 net/netfilter/xt_recent.c 		list_add_tail(&e->list, &t->iphash[recent_entry_hash4(addr)]);
e                 196 net/netfilter/xt_recent.c 		list_add_tail(&e->list, &t->iphash[recent_entry_hash6(addr)]);
e                 197 net/netfilter/xt_recent.c 	list_add_tail(&e->lru_list, &t->lru_list);
e                 199 net/netfilter/xt_recent.c 	return e;
e                 202 net/netfilter/xt_recent.c static void recent_entry_update(struct recent_table *t, struct recent_entry *e)
e                 204 net/netfilter/xt_recent.c 	e->index &= t->nstamps_max_mask;
e                 205 net/netfilter/xt_recent.c 	e->stamps[e->index++] = jiffies;
e                 206 net/netfilter/xt_recent.c 	if (e->index > e->nstamps)
e                 207 net/netfilter/xt_recent.c 		e->nstamps = e->index;
e                 208 net/netfilter/xt_recent.c 	list_move_tail(&e->lru_list, &t->lru_list);
e                 224 net/netfilter/xt_recent.c 	struct recent_entry *e, *next;
e                 228 net/netfilter/xt_recent.c 		list_for_each_entry_safe(e, next, &t->iphash[i], list)
e                 229 net/netfilter/xt_recent.c 			recent_entry_remove(t, e);
e                 239 net/netfilter/xt_recent.c 	struct recent_entry *e;
e                 274 net/netfilter/xt_recent.c 	e = recent_entry_lookup(t, &addr_mask, xt_family(par),
e                 276 net/netfilter/xt_recent.c 	if (e == NULL) {
e                 279 net/netfilter/xt_recent.c 		e = recent_entry_init(t, &addr_mask, xt_family(par), ttl);
e                 280 net/netfilter/xt_recent.c 		if (e == NULL)
e                 289 net/netfilter/xt_recent.c 		recent_entry_remove(t, e);
e                 295 net/netfilter/xt_recent.c 		for (i = 0; i < e->nstamps; i++) {
e                 296 net/netfilter/xt_recent.c 			if (info->seconds && time_after(time, e->stamps[i]))
e                 311 net/netfilter/xt_recent.c 		recent_entry_update(t, e);
e                 312 net/netfilter/xt_recent.c 		e->ttl = ttl;
e                 476 net/netfilter/xt_recent.c 	struct recent_entry *e;
e                 482 net/netfilter/xt_recent.c 		list_for_each_entry(e, &t->iphash[st->bucket], list)
e                 484 net/netfilter/xt_recent.c 				return e;
e                 492 net/netfilter/xt_recent.c 	const struct recent_entry *e = v;
e                 493 net/netfilter/xt_recent.c 	const struct list_head *head = e->list.next;
e                 512 net/netfilter/xt_recent.c 	const struct recent_entry *e = v;
e                 517 net/netfilter/xt_recent.c 	i = (e->index - 1) & t->nstamps_max_mask;
e                 519 net/netfilter/xt_recent.c 	if (e->family == NFPROTO_IPV4)
e                 521 net/netfilter/xt_recent.c 			   &e->addr.ip, e->ttl, e->stamps[i], e->index);
e                 524 net/netfilter/xt_recent.c 			   &e->addr.in6, e->ttl, e->stamps[i], e->index);
e                 525 net/netfilter/xt_recent.c 	for (i = 0; i < e->nstamps; i++)
e                 526 net/netfilter/xt_recent.c 		seq_printf(seq, "%s %lu", i ? "," : "", e->stamps[i]);
e                 555 net/netfilter/xt_recent.c 	struct recent_entry *e;
e                 603 net/netfilter/xt_recent.c 	e = recent_entry_lookup(t, &addr, family, 0);
e                 604 net/netfilter/xt_recent.c 	if (e == NULL) {
e                 609 net/netfilter/xt_recent.c 			recent_entry_update(t, e);
e                 611 net/netfilter/xt_recent.c 			recent_entry_remove(t, e);
e                 376 net/sched/act_ife.c 	struct tcf_meta_info *e;
e                 389 net/sched/act_ife.c 	list_for_each_entry(e, &ife->metalist, metalist) {
e                 390 net/sched/act_ife.c 		if (!e->ops->get(skb, e))
e                 410 net/sched/act_ife.c 	struct tcf_meta_info *e, *n;
e                 412 net/sched/act_ife.c 	list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
e                 413 net/sched/act_ife.c 		list_del(&e->metalist);
e                 414 net/sched/act_ife.c 		if (e->metaval) {
e                 415 net/sched/act_ife.c 			if (e->ops->release)
e                 416 net/sched/act_ife.c 				e->ops->release(e);
e                 418 net/sched/act_ife.c 				kfree(e->metaval);
e                 420 net/sched/act_ife.c 		module_put(e->ops->owner);
e                 421 net/sched/act_ife.c 		kfree(e);
e                 674 net/sched/act_ife.c 	struct tcf_meta_info *e;
e                 677 net/sched/act_ife.c 	list_for_each_entry(e, &ife->metalist, metalist) {
e                 678 net/sched/act_ife.c 		if (metaid == e->metaid) {
e                 679 net/sched/act_ife.c 			if (e->ops) {
e                 681 net/sched/act_ife.c 				return e->ops->decode(skb, mdata, mlen);
e                 749 net/sched/act_ife.c 	struct tcf_meta_info *e, *n;
e                 752 net/sched/act_ife.c 	list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
e                 753 net/sched/act_ife.c 		if (e->ops->check_presence) {
e                 754 net/sched/act_ife.c 			run_sz = e->ops->check_presence(skb, e);
e                 768 net/sched/act_ife.c 	struct tcf_meta_info *e;
e                 814 net/sched/act_ife.c 	list_for_each_entry(e, &ife->metalist, metalist) {
e                 815 net/sched/act_ife.c 		if (e->ops->encode) {
e                 816 net/sched/act_ife.c 			err = e->ops->encode(skb, (void *)(ife_meta + skboff),
e                 817 net/sched/act_ife.c 					     e);
e                  38 net/sched/act_ipt.c 	struct ipt_entry e = {};
e                  50 net/sched/act_ipt.c 	par.entryinfo = &e;
e                  22 net/sched/act_meta_mark.c 			  struct tcf_meta_info *e)
e                  26 net/sched/act_meta_mark.c 	return ife_encode_meta_u32(ifemark, skbdata, e);
e                  37 net/sched/act_meta_mark.c static int skbmark_check(struct sk_buff *skb, struct tcf_meta_info *e)
e                  39 net/sched/act_meta_mark.c 	return ife_check_meta_u32(skb->mark, e);
e                  21 net/sched/act_meta_skbprio.c static int skbprio_check(struct sk_buff *skb, struct tcf_meta_info *e)
e                  23 net/sched/act_meta_skbprio.c 	return ife_check_meta_u32(skb->priority, e);
e                  27 net/sched/act_meta_skbprio.c 			  struct tcf_meta_info *e)
e                  31 net/sched/act_meta_skbprio.c 	return ife_encode_meta_u32(ifeprio, skbdata, e);
e                  22 net/sched/act_meta_skbtcindex.c 			     struct tcf_meta_info *e)
e                  26 net/sched/act_meta_skbtcindex.c 	return ife_encode_meta_u16(ifetc_index, skbdata, e);
e                  37 net/sched/act_meta_skbtcindex.c static int skbtcindex_check(struct sk_buff *skb, struct tcf_meta_info *e)
e                  39 net/sched/act_meta_skbtcindex.c 	return ife_check_meta_u16(skb->tc_index, e);
e                 486 net/sched/cls_rsvp.h 	struct tcf_exts e;
e                 499 net/sched/cls_rsvp.h 	err = tcf_exts_init(&e, net, TCA_RSVP_ACT, TCA_RSVP_POLICE);
e                 502 net/sched/cls_rsvp.h 	err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr, true,
e                 533 net/sched/cls_rsvp.h 		tcf_exts_change(&n->exts, &e);
e                 606 net/sched/cls_rsvp.h 			tcf_exts_change(&f->exts, &e);
e                 652 net/sched/cls_rsvp.h 	tcf_exts_destroy(&e);
e                 337 net/sched/cls_tcindex.c 	struct tcf_exts e;
e                 339 net/sched/cls_tcindex.c 	err = tcf_exts_init(&e, net, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
e                 342 net/sched/cls_tcindex.c 	err = tcf_exts_validate(net, tp, tb, est, &e, ovr, true, extack);
e                 484 net/sched/cls_tcindex.c 	tcf_exts_change(&r->exts, &e);
e                 518 net/sched/cls_tcindex.c 	tcf_exts_destroy(&e);
e                  48 net/sched/em_ipt.c 	} e = {};
e                  55 net/sched/em_ipt.c 	mtpar.entryinfo = &e;
e                  94 net/sched/ematch.c 	struct tcf_ematch_ops *e = NULL;
e                  97 net/sched/ematch.c 	list_for_each_entry(e, &ematch_ops, link) {
e                  98 net/sched/ematch.c 		if (kind == e->kind) {
e                  99 net/sched/ematch.c 			if (!try_module_get(e->owner))
e                 100 net/sched/ematch.c 				e = NULL;
e                 102 net/sched/ematch.c 			return e;
e                 125 net/sched/ematch.c 	struct tcf_ematch_ops *e;
e                 131 net/sched/ematch.c 	list_for_each_entry(e, &ematch_ops, link)
e                 132 net/sched/ematch.c 		if (ops->kind == e->kind)
e                2234 net/sched/sch_api.c 	struct proc_dir_entry *e;
e                2236 net/sched/sch_api.c 	e = proc_create_single("psched", 0, net->proc_net, psched_show);
e                2237 net/sched/sch_api.c 	if (e == NULL)
e                1193 net/sched/sch_taprio.c 		struct tc_taprio_sched_entry *e = &offload->entries[i];
e                1195 net/sched/sch_taprio.c 		e->command = entry->command;
e                1196 net/sched/sch_taprio.c 		e->interval = entry->interval;
e                1197 net/sched/sch_taprio.c 		e->gate_mask = entry->gate_mask;
e                 236 net/tipc/name_distr.c 	struct distr_queue_item *e, *tmp;
e                 239 net/tipc/name_distr.c 	list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
e                 240 net/tipc/name_distr.c 		if (e->node != addr)
e                 242 net/tipc/name_distr.c 		list_del(&e->next);
e                 243 net/tipc/name_distr.c 		kfree(e);
e                1900 net/tipc/node.c 	struct tipc_link_entry *e;
e                1909 net/tipc/node.c 		e = &n->links[bearer_id];
e                1910 net/tipc/node.c 		if (e->link) {
e                1912 net/tipc/node.c 				tipc_link_set_tolerance(e->link, b->tolerance,
e                1915 net/tipc/node.c 				tipc_link_set_mtu(e->link, b->mtu);
e                1918 net/tipc/node.c 		tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr);
e                 128 net/tipc/topsrv.c 	struct outqueue_entry *e, *safe;
e                 138 net/tipc/topsrv.c 	list_for_each_entry_safe(e, safe, &con->outqueue, list) {
e                 139 net/tipc/topsrv.c 		list_del(&e->list);
e                 140 net/tipc/topsrv.c 		kfree(e);
e                 251 net/tipc/topsrv.c 	struct outqueue_entry *e;
e                 261 net/tipc/topsrv.c 		e = list_first_entry(queue, struct outqueue_entry, list);
e                 262 net/tipc/topsrv.c 		evt = &e->evt;
e                 265 net/tipc/topsrv.c 		if (e->inactive)
e                 293 net/tipc/topsrv.c 		list_del(&e->list);
e                 294 net/tipc/topsrv.c 		kfree(e);
e                 316 net/tipc/topsrv.c 	struct outqueue_entry *e;
e                 326 net/tipc/topsrv.c 	e = kmalloc(sizeof(*e), GFP_ATOMIC);
e                 327 net/tipc/topsrv.c 	if (!e)
e                 329 net/tipc/topsrv.c 	e->inactive = (event == TIPC_SUBSCR_TIMEOUT);
e                 330 net/tipc/topsrv.c 	memcpy(&e->evt, evt, sizeof(*evt));
e                 332 net/tipc/topsrv.c 	list_add_tail(&e->list, &con->outqueue);
e                 386 net/wireless/ibss.c 		freq->e = 6;
e                2323 net/wireless/scan.c 	iwe.u.freq.e = 0;
e                2332 net/wireless/scan.c 	iwe.u.freq.e = 6;
e                 201 net/wireless/wext-compat.c 				range->freq[c].e = 6;
e                 234 net/wireless/wext-compat.c 	if (freq->e == 0) {
e                 243 net/wireless/wext-compat.c 		for (i = 0; i < freq->e; i++)
e                 817 net/wireless/wext-compat.c 		freq->e = 6;
e                 136 net/wireless/wext-sme.c 		freq->e = 6;
e                  40 samples/bpf/trace_output_user.c 	} *e = data;
e                  42 samples/bpf/trace_output_user.c 	if (e->cookie != 0x12345678) {
e                  44 samples/bpf/trace_output_user.c 		       e->pid, e->cookie, size);
e                  81 samples/bpf/xdp_sample_pkts_user.c 	} __packed *e = data;
e                  84 samples/bpf/xdp_sample_pkts_user.c 	if (e->cookie != 0xdead) {
e                  85 samples/bpf/xdp_sample_pkts_user.c 		printf("BUG cookie %x sized %d\n", e->cookie, size);
e                  89 samples/bpf/xdp_sample_pkts_user.c 	printf("Pkt len: %-5d bytes. Ethernet hdr: ", e->pkt_len);
e                  90 samples/bpf/xdp_sample_pkts_user.c 	for (i = 0; i < 14 && i < e->pkt_len; i++)
e                  91 samples/bpf/xdp_sample_pkts_user.c 		printf("%02x ", e->pkt_data[i]);
e                 837 scripts/asn1_compiler.c 	struct element *e = calloc(1, sizeof(*e));
e                 838 scripts/asn1_compiler.c 	if (!e) {
e                 842 scripts/asn1_compiler.c 	e->list_next = element_list;
e                 843 scripts/asn1_compiler.c 	element_list = e;
e                 844 scripts/asn1_compiler.c 	return e;
e                1210 scripts/asn1_compiler.c static void dump_element(const struct element *e, int level)
e                1213 scripts/asn1_compiler.c 	const struct type *t = e->type_def;
e                1214 scripts/asn1_compiler.c 	const char *name = e->name ? e->name->content : ".";
e                1218 scripts/asn1_compiler.c 	if (e->class == 0 && e->method == 0 && e->tag == 0)
e                1220 scripts/asn1_compiler.c 	else if (e->class == ASN1_UNIV)
e                1222 scripts/asn1_compiler.c 			asn1_classes[e->class],
e                1223 scripts/asn1_compiler.c 			asn1_methods[e->method],
e                1224 scripts/asn1_compiler.c 			asn1_universal_tags[e->tag]);
e                1227 scripts/asn1_compiler.c 			asn1_classes[e->class],
e                1228 scripts/asn1_compiler.c 			asn1_methods[e->method],
e                1229 scripts/asn1_compiler.c 			e->tag);
e                1232 scripts/asn1_compiler.c 	       e->flags & ELEMENT_IMPLICIT ? 'I' : '-',
e                1233 scripts/asn1_compiler.c 	       e->flags & ELEMENT_EXPLICIT ? 'E' : '-',
e                1234 scripts/asn1_compiler.c 	       e->flags & ELEMENT_TAG_SPECIFIED ? 'T' : '-',
e                1235 scripts/asn1_compiler.c 	       e->flags & ELEMENT_SKIPPABLE ? 'S' : '-',
e                1236 scripts/asn1_compiler.c 	       e->flags & ELEMENT_CONDITIONAL ? 'C' : '-',
e                1237 scripts/asn1_compiler.c 	       "-tTqQcaro"[e->compound],
e                1242 scripts/asn1_compiler.c 	       e->action ? e->action->name : "");
e                1243 scripts/asn1_compiler.c 	if (e->compound == TYPE_REF)
e                1244 scripts/asn1_compiler.c 		dump_element(e->type->type->element, level + 3);
e                1246 scripts/asn1_compiler.c 		for (c = e->children; c; c = c->next)
e                1256 scripts/asn1_compiler.c static void render_element(FILE *out, struct element *e, struct element *tag);
e                1294 scripts/asn1_compiler.c 	struct element *e;
e                1364 scripts/asn1_compiler.c 	for (e = element_list; e; e = e->list_next)
e                1365 scripts/asn1_compiler.c 		e->flags &= ~ELEMENT_RENDERED;
e                1394 scripts/asn1_compiler.c 	struct element *e, *ce;
e                1398 scripts/asn1_compiler.c 	while ((e = render_list)) {
e                1399 scripts/asn1_compiler.c 		render_list = e->render_next;
e                1404 scripts/asn1_compiler.c 		e->entry_index = entry = nr_entries;
e                1406 scripts/asn1_compiler.c 		for (ce = e->children; ce; ce = ce->next)
e                1410 scripts/asn1_compiler.c 		act = e->action ? "_ACT" : "";
e                1411 scripts/asn1_compiler.c 		switch (e->compound) {
e                1429 scripts/asn1_compiler.c 		if (e->action)
e                1431 scripts/asn1_compiler.c 				      e->action->name);
e                1439 scripts/asn1_compiler.c static void render_element(FILE *out, struct element *e, struct element *tag)
e                1445 scripts/asn1_compiler.c 	if (e->flags & ELEMENT_SKIPPABLE ||
e                1449 scripts/asn1_compiler.c 	if ((e->type_def && e->type_def->ref_count > 1) ||
e                1453 scripts/asn1_compiler.c 	if (e->type_def && out) {
e                1454 scripts/asn1_compiler.c 		render_more(out, "\t// %s\n", e->type_def->name->content);
e                1458 scripts/asn1_compiler.c 	cond = (e->flags & ELEMENT_CONDITIONAL ||
e                1460 scripts/asn1_compiler.c 	act = e->action ? "_ACT" : "";
e                1461 scripts/asn1_compiler.c 	switch (e->compound) {
e                1465 scripts/asn1_compiler.c 		if (e->name)
e                1466 scripts/asn1_compiler.c 			render_more(out, "\t\t// %s", e->name->content);
e                1471 scripts/asn1_compiler.c 		render_element(out, e->children, e);
e                1488 scripts/asn1_compiler.c 		if (e->class == ASN1_UNIV && e->method == ASN1_PRIM && e->tag == 0)
e                1497 scripts/asn1_compiler.c 	x = tag ?: e;
e                1504 scripts/asn1_compiler.c 		tag = e;
e                1512 scripts/asn1_compiler.c 			      asn1_methods[tag->method | e->method],
e                1517 scripts/asn1_compiler.c 			      asn1_methods[tag->method | e->method],
e                1523 scripts/asn1_compiler.c 	switch (e->compound) {
e                1525 scripts/asn1_compiler.c 		render_element(out, e->type->type->element, tag);
e                1526 scripts/asn1_compiler.c 		if (e->action)
e                1535 scripts/asn1_compiler.c 			render_opcode(out, "_jump_target(%u),", e->entry_index);
e                1536 scripts/asn1_compiler.c 			if (e->type_def && e->type_def->name)
e                1538 scripts/asn1_compiler.c 					    e->type_def->name->content);
e                1540 scripts/asn1_compiler.c 			if (!(e->flags & ELEMENT_RENDERED)) {
e                1541 scripts/asn1_compiler.c 				e->flags |= ELEMENT_RENDERED;
e                1542 scripts/asn1_compiler.c 				*render_list_p = e;
e                1543 scripts/asn1_compiler.c 				render_list_p = &e->render_next;
e                1549 scripts/asn1_compiler.c 			for (ec = e->children; ec; ec = ec->next)
e                1561 scripts/asn1_compiler.c 			render_opcode(out, "_jump_target(%u),", e->entry_index);
e                1562 scripts/asn1_compiler.c 			if (e->type_def && e->type_def->name)
e                1564 scripts/asn1_compiler.c 					    e->type_def->name->content);
e                1566 scripts/asn1_compiler.c 			if (!(e->flags & ELEMENT_RENDERED)) {
e                1567 scripts/asn1_compiler.c 				e->flags |= ELEMENT_RENDERED;
e                1568 scripts/asn1_compiler.c 				*render_list_p = e;
e                1569 scripts/asn1_compiler.c 				render_list_p = &e->render_next;
e                1576 scripts/asn1_compiler.c 			render_element(out, e->children, NULL);
e                1578 scripts/asn1_compiler.c 			if (e->compound == SEQUENCE_OF)
e                1597 scripts/asn1_compiler.c 		for (ec = e->children; ec; ec = ec->next)
e                1601 scripts/asn1_compiler.c 		if (e->action)
e                1609 scripts/asn1_compiler.c 	if (e->action)
e                1610 scripts/asn1_compiler.c 		render_opcode(out, "_action(ACT_%s),\n", e->action->name);
e                  45 scripts/dtc/flattree.c static void bin_emit_cell(void *e, cell_t val)
e                  47 scripts/dtc/flattree.c 	struct data *dtbuf = e;
e                  52 scripts/dtc/flattree.c static void bin_emit_string(void *e, const char *str, int len)
e                  54 scripts/dtc/flattree.c 	struct data *dtbuf = e;
e                  63 scripts/dtc/flattree.c static void bin_emit_align(void *e, int a)
e                  65 scripts/dtc/flattree.c 	struct data *dtbuf = e;
e                  70 scripts/dtc/flattree.c static void bin_emit_data(void *e, struct data d)
e                  72 scripts/dtc/flattree.c 	struct data *dtbuf = e;
e                  77 scripts/dtc/flattree.c static void bin_emit_beginnode(void *e, struct label *labels)
e                  79 scripts/dtc/flattree.c 	bin_emit_cell(e, FDT_BEGIN_NODE);
e                  82 scripts/dtc/flattree.c static void bin_emit_endnode(void *e, struct label *labels)
e                  84 scripts/dtc/flattree.c 	bin_emit_cell(e, FDT_END_NODE);
e                  87 scripts/dtc/flattree.c static void bin_emit_property(void *e, struct label *labels)
e                  89 scripts/dtc/flattree.c 	bin_emit_cell(e, FDT_PROP);
e                 123 scripts/dtc/flattree.c static void asm_emit_cell(void *e, cell_t val)
e                 125 scripts/dtc/flattree.c 	FILE *f = e;
e                 132 scripts/dtc/flattree.c static void asm_emit_string(void *e, const char *str, int len)
e                 134 scripts/dtc/flattree.c 	FILE *f = e;
e                 142 scripts/dtc/flattree.c static void asm_emit_align(void *e, int a)
e                 144 scripts/dtc/flattree.c 	FILE *f = e;
e                 149 scripts/dtc/flattree.c static void asm_emit_data(void *e, struct data d)
e                 151 scripts/dtc/flattree.c 	FILE *f = e;
e                 159 scripts/dtc/flattree.c 		asm_emit_cell(e, fdt32_to_cpu(*((fdt32_t *)(d.val+off))));
e                 171 scripts/dtc/flattree.c static void asm_emit_beginnode(void *e, struct label *labels)
e                 173 scripts/dtc/flattree.c 	FILE *f = e;
e                 181 scripts/dtc/flattree.c 	asm_emit_cell(e, FDT_BEGIN_NODE);
e                 184 scripts/dtc/flattree.c static void asm_emit_endnode(void *e, struct label *labels)
e                 186 scripts/dtc/flattree.c 	FILE *f = e;
e                 190 scripts/dtc/flattree.c 	asm_emit_cell(e, FDT_END_NODE);
e                 197 scripts/dtc/flattree.c static void asm_emit_property(void *e, struct label *labels)
e                 199 scripts/dtc/flattree.c 	FILE *f = e;
e                 207 scripts/dtc/flattree.c 	asm_emit_cell(e, FDT_PROP);
e                 698 scripts/dtc/libfdt/fdt_overlay.c 	const char *s, *e;
e                 734 scripts/dtc/libfdt/fdt_overlay.c 		e = path + path_len;
e                 751 scripts/dtc/libfdt/fdt_overlay.c 		if ((e - s) < len || memcmp(s, "/__overlay__/", len))
e                 755 scripts/dtc/libfdt/fdt_overlay.c 		rel_path_len = e - rel_path;
e                  40 scripts/extract-cert.c 	int e, line;
e                  46 scripts/extract-cert.c 	while ((e = ERR_get_error_line(&file, &line))) {
e                  47 scripts/extract-cert.c 		ERR_error_string(e, buf);
e                 115 scripts/extract-cert.c 		ENGINE *e;
e                 126 scripts/extract-cert.c 		e = ENGINE_by_id("pkcs11");
e                 127 scripts/extract-cert.c 		ERR(!e, "Load PKCS#11 ENGINE");
e                 128 scripts/extract-cert.c 		if (ENGINE_init(e))
e                 133 scripts/extract-cert.c 			ERR(!ENGINE_ctrl_cmd_string(e, "PIN", key_pass, 0), "Set PKCS#11 PIN");
e                 134 scripts/extract-cert.c 		ENGINE_ctrl_cmd(e, "LOAD_CERT_CTRL", 0, &parms, NULL, 1);
e                 414 scripts/gcc-plugins/latent_entropy_plugin.c 	edge e, last_bb_e;
e                 420 scripts/gcc-plugins/latent_entropy_plugin.c 	FOR_EACH_EDGE(e, ei, last_bb_e->src->preds) {
e                 421 scripts/gcc-plugins/latent_entropy_plugin.c 		if (ENTRY_BLOCK_PTR_FOR_FN(cfun) == e->src)
e                 423 scripts/gcc-plugins/latent_entropy_plugin.c 		if (EXIT_BLOCK_PTR_FOR_FN(cfun) == e->src)
e                 426 scripts/gcc-plugins/latent_entropy_plugin.c 		handle_tail_calls(e->src, local_entropy);
e                 146 scripts/gcc-plugins/randomize_layout_plugin.c 	u64 e = x->a - rot(x->b, 7);
e                 149 scripts/gcc-plugins/randomize_layout_plugin.c 	x->c = x->d + e;
e                 150 scripts/gcc-plugins/randomize_layout_plugin.c 	x->d = e + x->a;
e                 329 scripts/genksyms/genksyms.c void free_list(struct string_list *s, struct string_list *e)
e                 331 scripts/genksyms/genksyms.c 	while (s != e) {
e                 503 scripts/genksyms/genksyms.c 	struct string_list **e, **b;
e                 516 scripts/genksyms/genksyms.c 	b = alloca(elem * sizeof(*e));
e                 517 scripts/genksyms/genksyms.c 	e = b + elem;
e                 518 scripts/genksyms/genksyms.c 	tmp2 = e - 1;
e                 524 scripts/genksyms/genksyms.c 	while (b != e) {
e                 533 scripts/genksyms/genksyms.c 	struct string_list **e, **b;
e                 544 scripts/genksyms/genksyms.c 	b = alloca(elem * sizeof(*e));
e                 545 scripts/genksyms/genksyms.c 	e = b + elem;
e                 546 scripts/genksyms/genksyms.c 	tmp2 = e - 1;
e                 552 scripts/genksyms/genksyms.c 	while (b != e) {
e                  59 scripts/genksyms/genksyms.h void free_list(struct string_list *s, struct string_list *e);
e                  37 scripts/genksyms/parse.y   struct string_list *b = *pb, *e = *pe;
e                  38 scripts/genksyms/parse.y   *pb = e;
e                  39 scripts/genksyms/parse.y   free_list(b, e);
e                 500 scripts/genksyms/parse.y yyerror(const char *e)
e                 502 scripts/genksyms/parse.y   error_with_pos("%s", e);
e                 268 scripts/insert-sys-cert.c static void print_usage(char *e)
e                 270 scripts/insert-sys-cert.c 	printf("Usage %s [-s <System.map>] -b <vmlinux> -c <certfile>\n", e);
e                1174 scripts/kconfig/confdata.c 	struct expr *e;
e                1189 scripts/kconfig/confdata.c 	expr_list_for_each_sym(prop->expr, e, sym)
e                1199 scripts/kconfig/confdata.c 	expr_list_for_each_sym(prop->expr, e, sym) {
e                1222 scripts/kconfig/confdata.c 	struct expr *e;
e                1229 scripts/kconfig/confdata.c 	expr_list_for_each_sym(prop->expr, e, sym) {
e                  17 scripts/kconfig/expr.c static struct expr *expr_eliminate_yn(struct expr *e);
e                  21 scripts/kconfig/expr.c 	struct expr *e = xcalloc(1, sizeof(*e));
e                  22 scripts/kconfig/expr.c 	e->type = E_SYMBOL;
e                  23 scripts/kconfig/expr.c 	e->left.sym = sym;
e                  24 scripts/kconfig/expr.c 	return e;
e                  29 scripts/kconfig/expr.c 	struct expr *e = xcalloc(1, sizeof(*e));
e                  30 scripts/kconfig/expr.c 	e->type = type;
e                  31 scripts/kconfig/expr.c 	e->left.expr = ce;
e                  32 scripts/kconfig/expr.c 	return e;
e                  37 scripts/kconfig/expr.c 	struct expr *e = xcalloc(1, sizeof(*e));
e                  38 scripts/kconfig/expr.c 	e->type = type;
e                  39 scripts/kconfig/expr.c 	e->left.expr = e1;
e                  40 scripts/kconfig/expr.c 	e->right.expr = e2;
e                  41 scripts/kconfig/expr.c 	return e;
e                  46 scripts/kconfig/expr.c 	struct expr *e = xcalloc(1, sizeof(*e));
e                  47 scripts/kconfig/expr.c 	e->type = type;
e                  48 scripts/kconfig/expr.c 	e->left.sym = s1;
e                  49 scripts/kconfig/expr.c 	e->right.sym = s2;
e                  50 scripts/kconfig/expr.c 	return e;
e                  69 scripts/kconfig/expr.c 	struct expr *e;
e                  74 scripts/kconfig/expr.c 	e = xmalloc(sizeof(*org));
e                  75 scripts/kconfig/expr.c 	memcpy(e, org, sizeof(*org));
e                  78 scripts/kconfig/expr.c 		e->left = org->left;
e                  81 scripts/kconfig/expr.c 		e->left.expr = expr_copy(org->left.expr);
e                  89 scripts/kconfig/expr.c 		e->left.sym = org->left.sym;
e                  90 scripts/kconfig/expr.c 		e->right.sym = org->right.sym;
e                  95 scripts/kconfig/expr.c 		e->left.expr = expr_copy(org->left.expr);
e                  96 scripts/kconfig/expr.c 		e->right.expr = expr_copy(org->right.expr);
e                  99 scripts/kconfig/expr.c 		fprintf(stderr, "can't copy type %d\n", e->type);
e                 100 scripts/kconfig/expr.c 		free(e);
e                 101 scripts/kconfig/expr.c 		e = NULL;
e                 105 scripts/kconfig/expr.c 	return e;
e                 108 scripts/kconfig/expr.c void expr_free(struct expr *e)
e                 110 scripts/kconfig/expr.c 	if (!e)
e                 113 scripts/kconfig/expr.c 	switch (e->type) {
e                 117 scripts/kconfig/expr.c 		expr_free(e->left.expr);
e                 128 scripts/kconfig/expr.c 		expr_free(e->left.expr);
e                 129 scripts/kconfig/expr.c 		expr_free(e->right.expr);
e                 132 scripts/kconfig/expr.c 		fprintf(stderr, "how to free type %d?\n", e->type);
e                 135 scripts/kconfig/expr.c 	free(e);
e                 317 scripts/kconfig/expr.c static struct expr *expr_eliminate_yn(struct expr *e)
e                 321 scripts/kconfig/expr.c 	if (e) switch (e->type) {
e                 323 scripts/kconfig/expr.c 		e->left.expr = expr_eliminate_yn(e->left.expr);
e                 324 scripts/kconfig/expr.c 		e->right.expr = expr_eliminate_yn(e->right.expr);
e                 325 scripts/kconfig/expr.c 		if (e->left.expr->type == E_SYMBOL) {
e                 326 scripts/kconfig/expr.c 			if (e->left.expr->left.sym == &symbol_no) {
e                 327 scripts/kconfig/expr.c 				expr_free(e->left.expr);
e                 328 scripts/kconfig/expr.c 				expr_free(e->right.expr);
e                 329 scripts/kconfig/expr.c 				e->type = E_SYMBOL;
e                 330 scripts/kconfig/expr.c 				e->left.sym = &symbol_no;
e                 331 scripts/kconfig/expr.c 				e->right.expr = NULL;
e                 332 scripts/kconfig/expr.c 				return e;
e                 333 scripts/kconfig/expr.c 			} else if (e->left.expr->left.sym == &symbol_yes) {
e                 334 scripts/kconfig/expr.c 				free(e->left.expr);
e                 335 scripts/kconfig/expr.c 				tmp = e->right.expr;
e                 336 scripts/kconfig/expr.c 				*e = *(e->right.expr);
e                 338 scripts/kconfig/expr.c 				return e;
e                 341 scripts/kconfig/expr.c 		if (e->right.expr->type == E_SYMBOL) {
e                 342 scripts/kconfig/expr.c 			if (e->right.expr->left.sym == &symbol_no) {
e                 343 scripts/kconfig/expr.c 				expr_free(e->left.expr);
e                 344 scripts/kconfig/expr.c 				expr_free(e->right.expr);
e                 345 scripts/kconfig/expr.c 				e->type = E_SYMBOL;
e                 346 scripts/kconfig/expr.c 				e->left.sym = &symbol_no;
e                 347 scripts/kconfig/expr.c 				e->right.expr = NULL;
e                 348 scripts/kconfig/expr.c 				return e;
e                 349 scripts/kconfig/expr.c 			} else if (e->right.expr->left.sym == &symbol_yes) {
e                 350 scripts/kconfig/expr.c 				free(e->right.expr);
e                 351 scripts/kconfig/expr.c 				tmp = e->left.expr;
e                 352 scripts/kconfig/expr.c 				*e = *(e->left.expr);
e                 354 scripts/kconfig/expr.c 				return e;
e                 359 scripts/kconfig/expr.c 		e->left.expr = expr_eliminate_yn(e->left.expr);
e                 360 scripts/kconfig/expr.c 		e->right.expr = expr_eliminate_yn(e->right.expr);
e                 361 scripts/kconfig/expr.c 		if (e->left.expr->type == E_SYMBOL) {
e                 362 scripts/kconfig/expr.c 			if (e->left.expr->left.sym == &symbol_no) {
e                 363 scripts/kconfig/expr.c 				free(e->left.expr);
e                 364 scripts/kconfig/expr.c 				tmp = e->right.expr;
e                 365 scripts/kconfig/expr.c 				*e = *(e->right.expr);
e                 367 scripts/kconfig/expr.c 				return e;
e                 368 scripts/kconfig/expr.c 			} else if (e->left.expr->left.sym == &symbol_yes) {
e                 369 scripts/kconfig/expr.c 				expr_free(e->left.expr);
e                 370 scripts/kconfig/expr.c 				expr_free(e->right.expr);
e                 371 scripts/kconfig/expr.c 				e->type = E_SYMBOL;
e                 372 scripts/kconfig/expr.c 				e->left.sym = &symbol_yes;
e                 373 scripts/kconfig/expr.c 				e->right.expr = NULL;
e                 374 scripts/kconfig/expr.c 				return e;
e                 377 scripts/kconfig/expr.c 		if (e->right.expr->type == E_SYMBOL) {
e                 378 scripts/kconfig/expr.c 			if (e->right.expr->left.sym == &symbol_no) {
e                 379 scripts/kconfig/expr.c 				free(e->right.expr);
e                 380 scripts/kconfig/expr.c 				tmp = e->left.expr;
e                 381 scripts/kconfig/expr.c 				*e = *(e->left.expr);
e                 383 scripts/kconfig/expr.c 				return e;
e                 384 scripts/kconfig/expr.c 			} else if (e->right.expr->left.sym == &symbol_yes) {
e                 385 scripts/kconfig/expr.c 				expr_free(e->left.expr);
e                 386 scripts/kconfig/expr.c 				expr_free(e->right.expr);
e                 387 scripts/kconfig/expr.c 				e->type = E_SYMBOL;
e                 388 scripts/kconfig/expr.c 				e->left.sym = &symbol_yes;
e                 389 scripts/kconfig/expr.c 				e->right.expr = NULL;
e                 390 scripts/kconfig/expr.c 				return e;
e                 397 scripts/kconfig/expr.c 	return e;
e                 403 scripts/kconfig/expr.c struct expr *expr_trans_bool(struct expr *e)
e                 405 scripts/kconfig/expr.c 	if (!e)
e                 407 scripts/kconfig/expr.c 	switch (e->type) {
e                 411 scripts/kconfig/expr.c 		e->left.expr = expr_trans_bool(e->left.expr);
e                 412 scripts/kconfig/expr.c 		e->right.expr = expr_trans_bool(e->right.expr);
e                 416 scripts/kconfig/expr.c 		if (e->left.sym->type == S_TRISTATE) {
e                 417 scripts/kconfig/expr.c 			if (e->right.sym == &symbol_no) {
e                 418 scripts/kconfig/expr.c 				e->type = E_SYMBOL;
e                 419 scripts/kconfig/expr.c 				e->right.sym = NULL;
e                 426 scripts/kconfig/expr.c 	return e;
e                 665 scripts/kconfig/expr.c struct expr *expr_eliminate_dups(struct expr *e)
e                 668 scripts/kconfig/expr.c 	if (!e)
e                 669 scripts/kconfig/expr.c 		return e;
e                 674 scripts/kconfig/expr.c 		switch (e->type) {
e                 676 scripts/kconfig/expr.c 			expr_eliminate_dups1(e->type, &e, &e);
e                 683 scripts/kconfig/expr.c 		e = expr_eliminate_yn(e);
e                 686 scripts/kconfig/expr.c 	return e;
e                 695 scripts/kconfig/expr.c struct expr *expr_transform(struct expr *e)
e                 699 scripts/kconfig/expr.c 	if (!e)
e                 701 scripts/kconfig/expr.c 	switch (e->type) {
e                 712 scripts/kconfig/expr.c 		e->left.expr = expr_transform(e->left.expr);
e                 713 scripts/kconfig/expr.c 		e->right.expr = expr_transform(e->right.expr);
e                 716 scripts/kconfig/expr.c 	switch (e->type) {
e                 718 scripts/kconfig/expr.c 		if (e->left.sym->type != S_BOOLEAN)
e                 720 scripts/kconfig/expr.c 		if (e->right.sym == &symbol_no) {
e                 721 scripts/kconfig/expr.c 			e->type = E_NOT;
e                 722 scripts/kconfig/expr.c 			e->left.expr = expr_alloc_symbol(e->left.sym);
e                 723 scripts/kconfig/expr.c 			e->right.sym = NULL;
e                 726 scripts/kconfig/expr.c 		if (e->right.sym == &symbol_mod) {
e                 727 scripts/kconfig/expr.c 			printf("boolean symbol %s tested for 'm'? test forced to 'n'\n", e->left.sym->name);
e                 728 scripts/kconfig/expr.c 			e->type = E_SYMBOL;
e                 729 scripts/kconfig/expr.c 			e->left.sym = &symbol_no;
e                 730 scripts/kconfig/expr.c 			e->right.sym = NULL;
e                 733 scripts/kconfig/expr.c 		if (e->right.sym == &symbol_yes) {
e                 734 scripts/kconfig/expr.c 			e->type = E_SYMBOL;
e                 735 scripts/kconfig/expr.c 			e->right.sym = NULL;
e                 740 scripts/kconfig/expr.c 		if (e->left.sym->type != S_BOOLEAN)
e                 742 scripts/kconfig/expr.c 		if (e->right.sym == &symbol_no) {
e                 743 scripts/kconfig/expr.c 			e->type = E_SYMBOL;
e                 744 scripts/kconfig/expr.c 			e->right.sym = NULL;
e                 747 scripts/kconfig/expr.c 		if (e->right.sym == &symbol_mod) {
e                 748 scripts/kconfig/expr.c 			printf("boolean symbol %s tested for 'm'? test forced to 'y'\n", e->left.sym->name);
e                 749 scripts/kconfig/expr.c 			e->type = E_SYMBOL;
e                 750 scripts/kconfig/expr.c 			e->left.sym = &symbol_yes;
e                 751 scripts/kconfig/expr.c 			e->right.sym = NULL;
e                 754 scripts/kconfig/expr.c 		if (e->right.sym == &symbol_yes) {
e                 755 scripts/kconfig/expr.c 			e->type = E_NOT;
e                 756 scripts/kconfig/expr.c 			e->left.expr = expr_alloc_symbol(e->left.sym);
e                 757 scripts/kconfig/expr.c 			e->right.sym = NULL;
e                 762 scripts/kconfig/expr.c 		switch (e->left.expr->type) {
e                 765 scripts/kconfig/expr.c 			tmp = e->left.expr->left.expr;
e                 766 scripts/kconfig/expr.c 			free(e->left.expr);
e                 767 scripts/kconfig/expr.c 			free(e);
e                 768 scripts/kconfig/expr.c 			e = tmp;
e                 769 scripts/kconfig/expr.c 			e = expr_transform(e);
e                 774 scripts/kconfig/expr.c 			tmp = e->left.expr;
e                 775 scripts/kconfig/expr.c 			free(e);
e                 776 scripts/kconfig/expr.c 			e = tmp;
e                 777 scripts/kconfig/expr.c 			e->type = e->type == E_EQUAL ? E_UNEQUAL : E_EQUAL;
e                 782 scripts/kconfig/expr.c 			tmp = e->left.expr;
e                 783 scripts/kconfig/expr.c 			free(e);
e                 784 scripts/kconfig/expr.c 			e = tmp;
e                 785 scripts/kconfig/expr.c 			e->type = e->type == E_LEQ ? E_GTH : E_LTH;
e                 790 scripts/kconfig/expr.c 			tmp = e->left.expr;
e                 791 scripts/kconfig/expr.c 			free(e);
e                 792 scripts/kconfig/expr.c 			e = tmp;
e                 793 scripts/kconfig/expr.c 			e->type = e->type == E_LTH ? E_GEQ : E_LEQ;
e                 797 scripts/kconfig/expr.c 			tmp = e->left.expr;
e                 798 scripts/kconfig/expr.c 			e->type = E_AND;
e                 799 scripts/kconfig/expr.c 			e->right.expr = expr_alloc_one(E_NOT, tmp->right.expr);
e                 802 scripts/kconfig/expr.c 			e = expr_transform(e);
e                 806 scripts/kconfig/expr.c 			tmp = e->left.expr;
e                 807 scripts/kconfig/expr.c 			e->type = E_OR;
e                 808 scripts/kconfig/expr.c 			e->right.expr = expr_alloc_one(E_NOT, tmp->right.expr);
e                 811 scripts/kconfig/expr.c 			e = expr_transform(e);
e                 814 scripts/kconfig/expr.c 			if (e->left.expr->left.sym == &symbol_yes) {
e                 816 scripts/kconfig/expr.c 				tmp = e->left.expr;
e                 817 scripts/kconfig/expr.c 				free(e);
e                 818 scripts/kconfig/expr.c 				e = tmp;
e                 819 scripts/kconfig/expr.c 				e->type = E_SYMBOL;
e                 820 scripts/kconfig/expr.c 				e->left.sym = &symbol_no;
e                 823 scripts/kconfig/expr.c 			if (e->left.expr->left.sym == &symbol_mod) {
e                 825 scripts/kconfig/expr.c 				tmp = e->left.expr;
e                 826 scripts/kconfig/expr.c 				free(e);
e                 827 scripts/kconfig/expr.c 				e = tmp;
e                 828 scripts/kconfig/expr.c 				e->type = E_SYMBOL;
e                 829 scripts/kconfig/expr.c 				e->left.sym = &symbol_mod;
e                 832 scripts/kconfig/expr.c 			if (e->left.expr->left.sym == &symbol_no) {
e                 834 scripts/kconfig/expr.c 				tmp = e->left.expr;
e                 835 scripts/kconfig/expr.c 				free(e);
e                 836 scripts/kconfig/expr.c 				e = tmp;
e                 837 scripts/kconfig/expr.c 				e->type = E_SYMBOL;
e                 838 scripts/kconfig/expr.c 				e->left.sym = &symbol_yes;
e                 849 scripts/kconfig/expr.c 	return e;
e                 923 scripts/kconfig/expr.c struct expr *expr_trans_compare(struct expr *e, enum expr_type type, struct symbol *sym)
e                 927 scripts/kconfig/expr.c 	if (!e) {
e                 928 scripts/kconfig/expr.c 		e = expr_alloc_symbol(sym);
e                 930 scripts/kconfig/expr.c 			e = expr_alloc_one(E_NOT, e);
e                 931 scripts/kconfig/expr.c 		return e;
e                 933 scripts/kconfig/expr.c 	switch (e->type) {
e                 935 scripts/kconfig/expr.c 		e1 = expr_trans_compare(e->left.expr, E_EQUAL, sym);
e                 936 scripts/kconfig/expr.c 		e2 = expr_trans_compare(e->right.expr, E_EQUAL, sym);
e                 938 scripts/kconfig/expr.c 			e = expr_alloc_two(E_AND, e1, e2);
e                 940 scripts/kconfig/expr.c 			e = expr_alloc_two(E_OR, e1, e2);
e                 942 scripts/kconfig/expr.c 			e = expr_alloc_one(E_NOT, e);
e                 943 scripts/kconfig/expr.c 		return e;
e                 945 scripts/kconfig/expr.c 		e1 = expr_trans_compare(e->left.expr, E_EQUAL, sym);
e                 946 scripts/kconfig/expr.c 		e2 = expr_trans_compare(e->right.expr, E_EQUAL, sym);
e                 948 scripts/kconfig/expr.c 			e = expr_alloc_two(E_OR, e1, e2);
e                 950 scripts/kconfig/expr.c 			e = expr_alloc_two(E_AND, e1, e2);
e                 952 scripts/kconfig/expr.c 			e = expr_alloc_one(E_NOT, e);
e                 953 scripts/kconfig/expr.c 		return e;
e                 955 scripts/kconfig/expr.c 		return expr_trans_compare(e->left.expr, type == E_EQUAL ? E_UNEQUAL : E_EQUAL, sym);
e                 964 scripts/kconfig/expr.c 				return expr_copy(e);
e                 968 scripts/kconfig/expr.c 				return expr_alloc_one(E_NOT, expr_copy(e));
e                 971 scripts/kconfig/expr.c 				return expr_alloc_one(E_NOT, expr_copy(e));
e                 975 scripts/kconfig/expr.c 				return expr_copy(e);
e                 979 scripts/kconfig/expr.c 		return expr_alloc_comp(type, e->left.sym, sym);
e                1031 scripts/kconfig/expr.c tristate expr_calc_value(struct expr *e)
e                1039 scripts/kconfig/expr.c 	if (!e)
e                1042 scripts/kconfig/expr.c 	switch (e->type) {
e                1044 scripts/kconfig/expr.c 		sym_calc_value(e->left.sym);
e                1045 scripts/kconfig/expr.c 		return e->left.sym->curr.tri;
e                1047 scripts/kconfig/expr.c 		val1 = expr_calc_value(e->left.expr);
e                1048 scripts/kconfig/expr.c 		val2 = expr_calc_value(e->right.expr);
e                1051 scripts/kconfig/expr.c 		val1 = expr_calc_value(e->left.expr);
e                1052 scripts/kconfig/expr.c 		val2 = expr_calc_value(e->right.expr);
e                1055 scripts/kconfig/expr.c 		val1 = expr_calc_value(e->left.expr);
e                1065 scripts/kconfig/expr.c 		printf("expr_calc_value: %d?\n", e->type);
e                1069 scripts/kconfig/expr.c 	sym_calc_value(e->left.sym);
e                1070 scripts/kconfig/expr.c 	sym_calc_value(e->right.sym);
e                1071 scripts/kconfig/expr.c 	str1 = sym_get_string_value(e->left.sym);
e                1072 scripts/kconfig/expr.c 	str2 = sym_get_string_value(e->right.sym);
e                1074 scripts/kconfig/expr.c 	if (e->left.sym->type != S_STRING || e->right.sym->type != S_STRING) {
e                1075 scripts/kconfig/expr.c 		k1 = expr_parse_string(str1, e->left.sym->type, &lval);
e                1076 scripts/kconfig/expr.c 		k2 = expr_parse_string(str2, e->right.sym->type, &rval);
e                1086 scripts/kconfig/expr.c 	switch(e->type) {
e                1100 scripts/kconfig/expr.c 		printf("expr_calc_value: relation %d?\n", e->type);
e                1139 scripts/kconfig/expr.c void expr_print(struct expr *e,
e                1143 scripts/kconfig/expr.c 	if (!e) {
e                1148 scripts/kconfig/expr.c 	if (expr_compare_type(prevtoken, e->type) > 0)
e                1150 scripts/kconfig/expr.c 	switch (e->type) {
e                1152 scripts/kconfig/expr.c 		if (e->left.sym->name)
e                1153 scripts/kconfig/expr.c 			fn(data, e->left.sym, e->left.sym->name);
e                1159 scripts/kconfig/expr.c 		expr_print(e->left.expr, fn, data, E_NOT);
e                1162 scripts/kconfig/expr.c 		if (e->left.sym->name)
e                1163 scripts/kconfig/expr.c 			fn(data, e->left.sym, e->left.sym->name);
e                1167 scripts/kconfig/expr.c 		fn(data, e->right.sym, e->right.sym->name);
e                1171 scripts/kconfig/expr.c 		if (e->left.sym->name)
e                1172 scripts/kconfig/expr.c 			fn(data, e->left.sym, e->left.sym->name);
e                1175 scripts/kconfig/expr.c 		fn(data, NULL, e->type == E_LEQ ? "<=" : "<");
e                1176 scripts/kconfig/expr.c 		fn(data, e->right.sym, e->right.sym->name);
e                1180 scripts/kconfig/expr.c 		if (e->left.sym->name)
e                1181 scripts/kconfig/expr.c 			fn(data, e->left.sym, e->left.sym->name);
e                1184 scripts/kconfig/expr.c 		fn(data, NULL, e->type == E_GEQ ? ">=" : ">");
e                1185 scripts/kconfig/expr.c 		fn(data, e->right.sym, e->right.sym->name);
e                1188 scripts/kconfig/expr.c 		if (e->left.sym->name)
e                1189 scripts/kconfig/expr.c 			fn(data, e->left.sym, e->left.sym->name);
e                1193 scripts/kconfig/expr.c 		fn(data, e->right.sym, e->right.sym->name);
e                1196 scripts/kconfig/expr.c 		expr_print(e->left.expr, fn, data, E_OR);
e                1198 scripts/kconfig/expr.c 		expr_print(e->right.expr, fn, data, E_OR);
e                1201 scripts/kconfig/expr.c 		expr_print(e->left.expr, fn, data, E_AND);
e                1203 scripts/kconfig/expr.c 		expr_print(e->right.expr, fn, data, E_AND);
e                1206 scripts/kconfig/expr.c 		fn(data, e->right.sym, e->right.sym->name);
e                1207 scripts/kconfig/expr.c 		if (e->left.expr) {
e                1209 scripts/kconfig/expr.c 			expr_print(e->left.expr, fn, data, E_LIST);
e                1214 scripts/kconfig/expr.c 		fn(data, e->left.sym, e->left.sym->name);
e                1216 scripts/kconfig/expr.c 		fn(data, e->right.sym, e->right.sym->name);
e                1222 scripts/kconfig/expr.c 		sprintf(buf, "<unknown type %d>", e->type);
e                1227 scripts/kconfig/expr.c 	if (expr_compare_type(prevtoken, e->type) > 0)
e                1236 scripts/kconfig/expr.c void expr_fprint(struct expr *e, FILE *out)
e                1238 scripts/kconfig/expr.c 	expr_print(e, expr_print_file_helper, out, E_NONE);
e                1271 scripts/kconfig/expr.c void expr_gstr_print(struct expr *e, struct gstr *gs)
e                1273 scripts/kconfig/expr.c 	expr_print(e, expr_print_gstr_helper, gs, E_NONE);
e                1281 scripts/kconfig/expr.c static void expr_print_revdep(struct expr *e,
e                1285 scripts/kconfig/expr.c 	if (e->type == E_OR) {
e                1286 scripts/kconfig/expr.c 		expr_print_revdep(e->left.expr, fn, data, pr_type, title);
e                1287 scripts/kconfig/expr.c 		expr_print_revdep(e->right.expr, fn, data, pr_type, title);
e                1288 scripts/kconfig/expr.c 	} else if (expr_calc_value(e) == pr_type) {
e                1295 scripts/kconfig/expr.c 		expr_print(e, fn, data, E_NONE);
e                1300 scripts/kconfig/expr.c void expr_gstr_print_revdep(struct expr *e, struct gstr *gs,
e                1303 scripts/kconfig/expr.c 	expr_print_revdep(e, expr_print_gstr_helper, gs, pr_type, &title);
e                  51 scripts/kconfig/expr.h #define expr_list_for_each_sym(l, e, s) \
e                  52 scripts/kconfig/expr.h 	for (e = (l); e && (s = e->right.sym); e = e->left.expr)
e                 302 scripts/kconfig/expr.h void expr_free(struct expr *e);
e                 304 scripts/kconfig/expr.h tristate expr_calc_value(struct expr *e);
e                 305 scripts/kconfig/expr.h struct expr *expr_trans_bool(struct expr *e);
e                 306 scripts/kconfig/expr.h struct expr *expr_eliminate_dups(struct expr *e);
e                 307 scripts/kconfig/expr.h struct expr *expr_transform(struct expr *e);
e                 310 scripts/kconfig/expr.h struct expr *expr_trans_compare(struct expr *e, enum expr_type type, struct symbol *sym);
e                 312 scripts/kconfig/expr.h void expr_fprint(struct expr *e, FILE *out);
e                 314 scripts/kconfig/expr.h void expr_gstr_print(struct expr *e, struct gstr *gs);
e                 315 scripts/kconfig/expr.h void expr_gstr_print_revdep(struct expr *e, struct gstr *gs,
e                 318 scripts/kconfig/expr.h static inline int expr_is_yes(struct expr *e)
e                 320 scripts/kconfig/expr.h 	return !e || (e->type == E_SYMBOL && e->left.sym == &symbol_yes);
e                 323 scripts/kconfig/expr.h static inline int expr_is_no(struct expr *e)
e                 325 scripts/kconfig/expr.h 	return e && (e->type == E_SYMBOL && e->left.sym == &symbol_no);
e                  65 scripts/kconfig/lkc_proto.h void expr_print(struct expr *e, void (*fn)(void *, struct symbol *, const char *), void *data, int prevtoken);
e                  81 scripts/kconfig/menu.c static struct expr *rewrite_m(struct expr *e)
e                  83 scripts/kconfig/menu.c 	if (!e)
e                  84 scripts/kconfig/menu.c 		return e;
e                  86 scripts/kconfig/menu.c 	switch (e->type) {
e                  88 scripts/kconfig/menu.c 		e->left.expr = rewrite_m(e->left.expr);
e                  92 scripts/kconfig/menu.c 		e->left.expr = rewrite_m(e->left.expr);
e                  93 scripts/kconfig/menu.c 		e->right.expr = rewrite_m(e->right.expr);
e                  97 scripts/kconfig/menu.c 		if (e->left.sym == &symbol_mod)
e                  98 scripts/kconfig/menu.c 			return expr_alloc_and(e, expr_alloc_symbol(modules_sym));
e                 103 scripts/kconfig/menu.c 	return e;
e                  46 scripts/kconfig/preprocess.c 	struct env *e;
e                  48 scripts/kconfig/preprocess.c 	e = xmalloc(sizeof(*e));
e                  49 scripts/kconfig/preprocess.c 	e->name = xstrdup(name);
e                  50 scripts/kconfig/preprocess.c 	e->value = xstrdup(value);
e                  52 scripts/kconfig/preprocess.c 	list_add_tail(&e->node, &env_list);
e                  55 scripts/kconfig/preprocess.c static void env_del(struct env *e)
e                  57 scripts/kconfig/preprocess.c 	list_del(&e->node);
e                  58 scripts/kconfig/preprocess.c 	free(e->name);
e                  59 scripts/kconfig/preprocess.c 	free(e->value);
e                  60 scripts/kconfig/preprocess.c 	free(e);
e                  66 scripts/kconfig/preprocess.c 	struct env *e;
e                  72 scripts/kconfig/preprocess.c 	list_for_each_entry(e, &env_list, node) {
e                  73 scripts/kconfig/preprocess.c 		if (!strcmp(name, e->name))
e                  74 scripts/kconfig/preprocess.c 			return xstrdup(e->value);
e                  92 scripts/kconfig/preprocess.c 	struct env *e, *tmp;
e                  94 scripts/kconfig/preprocess.c 	list_for_each_entry_safe(e, tmp, &env_list, node) {
e                  95 scripts/kconfig/preprocess.c 		fprintf(f, "ifneq \"$(%s)\" \"%s\"\n", e->name, e->value);
e                  98 scripts/kconfig/preprocess.c 		env_del(e);
e                 284 scripts/kconfig/qconf.cc void ConfigLineEdit::keyPressEvent(QKeyEvent* e)
e                 286 scripts/kconfig/qconf.cc 	switch (e->key()) {
e                 295 scripts/kconfig/qconf.cc 		Parent::keyPressEvent(e);
e                 298 scripts/kconfig/qconf.cc 	e->accept();
e                 761 scripts/kconfig/qconf.cc void ConfigList::mousePressEvent(QMouseEvent* e)
e                 765 scripts/kconfig/qconf.cc 	Parent::mousePressEvent(e);
e                 768 scripts/kconfig/qconf.cc void ConfigList::mouseReleaseEvent(QMouseEvent* e)
e                 770 scripts/kconfig/qconf.cc 	QPoint p = e->pos();
e                 819 scripts/kconfig/qconf.cc 	Parent::mouseReleaseEvent(e);
e                 822 scripts/kconfig/qconf.cc void ConfigList::mouseMoveEvent(QMouseEvent* e)
e                 826 scripts/kconfig/qconf.cc 	Parent::mouseMoveEvent(e);
e                 829 scripts/kconfig/qconf.cc void ConfigList::mouseDoubleClickEvent(QMouseEvent* e)
e                 831 scripts/kconfig/qconf.cc 	QPoint p = e->pos(); // TODO: Check if this works(was contentsToViewport).
e                 853 scripts/kconfig/qconf.cc 	Parent::mouseDoubleClickEvent(e);
e                 856 scripts/kconfig/qconf.cc void ConfigList::focusInEvent(QFocusEvent *e)
e                 860 scripts/kconfig/qconf.cc 	Parent::focusInEvent(e);
e                 870 scripts/kconfig/qconf.cc void ConfigList::contextMenuEvent(QContextMenuEvent *e)
e                 872 scripts/kconfig/qconf.cc 	if (e->y() <= header()->geometry().bottom()) {
e                 902 scripts/kconfig/qconf.cc 		headerPopup->exec(e->globalPos());
e                 903 scripts/kconfig/qconf.cc 		e->accept();
e                 905 scripts/kconfig/qconf.cc 		e->ignore();
e                1238 scripts/kconfig/qconf.cc void ConfigInfoView::contextMenuEvent(QContextMenuEvent *e)
e                1240 scripts/kconfig/qconf.cc 	Parent::contextMenuEvent(e);
e                1741 scripts/kconfig/qconf.cc void ConfigMainWindow::closeEvent(QCloseEvent* e)
e                1744 scripts/kconfig/qconf.cc 		e->accept();
e                1755 scripts/kconfig/qconf.cc 			e->accept();
e                1757 scripts/kconfig/qconf.cc 			e->ignore();
e                1760 scripts/kconfig/qconf.cc 		e->accept();
e                1763 scripts/kconfig/qconf.cc 		e->ignore();
e                  55 scripts/kconfig/qconf.h 	void keyPressEvent(QKeyEvent *e);
e                  56 scripts/kconfig/qconf.h 	void mousePressEvent(QMouseEvent *e);
e                  57 scripts/kconfig/qconf.h 	void mouseReleaseEvent(QMouseEvent *e);
e                  58 scripts/kconfig/qconf.h 	void mouseMoveEvent(QMouseEvent *e);
e                  59 scripts/kconfig/qconf.h 	void mouseDoubleClickEvent(QMouseEvent *e);
e                  60 scripts/kconfig/qconf.h 	void focusInEvent(QFocusEvent *e);
e                  61 scripts/kconfig/qconf.h 	void contextMenuEvent(QContextMenuEvent *e);
e                 202 scripts/kconfig/qconf.h 	void keyPressEvent(QKeyEvent *e);
e                 264 scripts/kconfig/qconf.h 	void contextMenuEvent(QContextMenuEvent *e);
e                 316 scripts/kconfig/qconf.h 	void closeEvent(QCloseEvent *e);
e                 244 scripts/kconfig/symbol.c 	struct expr *e;
e                 258 scripts/kconfig/symbol.c 	expr_list_for_each_sym(prop->expr, e, def_sym)
e                 270 scripts/kconfig/symbol.c 	struct expr *e;
e                 276 scripts/kconfig/symbol.c 	expr_list_for_each_sym(prop->expr, e, def_sym) {
e                 323 scripts/kconfig/symbol.c 	struct expr *e;
e                 446 scripts/kconfig/symbol.c 		expr_list_for_each_sym(prop->expr, e, choice_sym) {
e                 512 scripts/kconfig/symbol.c 		struct expr *e;
e                 517 scripts/kconfig/symbol.c 		for (e = prop->expr; e; e = e->left.expr) {
e                 518 scripts/kconfig/symbol.c 			if (e->right.sym->visible != no)
e                 519 scripts/kconfig/symbol.c 				e->right.sym->flags |= SYMBOL_DEF_USER;
e                1125 scripts/kconfig/symbol.c static struct symbol *sym_check_expr_deps(struct expr *e)
e                1129 scripts/kconfig/symbol.c 	if (!e)
e                1131 scripts/kconfig/symbol.c 	switch (e->type) {
e                1134 scripts/kconfig/symbol.c 		sym = sym_check_expr_deps(e->left.expr);
e                1137 scripts/kconfig/symbol.c 		return sym_check_expr_deps(e->right.expr);
e                1139 scripts/kconfig/symbol.c 		return sym_check_expr_deps(e->left.expr);
e                1146 scripts/kconfig/symbol.c 		sym = sym_check_deps(e->left.sym);
e                1149 scripts/kconfig/symbol.c 		return sym_check_deps(e->right.sym);
e                1151 scripts/kconfig/symbol.c 		return sym_check_deps(e->left.sym);
e                1155 scripts/kconfig/symbol.c 	fprintf(stderr, "Oops! How to check %d?\n", e->type);
e                1212 scripts/kconfig/symbol.c 	struct expr *e;
e                1218 scripts/kconfig/symbol.c 	expr_list_for_each_sym(prop->expr, e, sym)
e                1227 scripts/kconfig/symbol.c 	expr_list_for_each_sym(prop->expr, e, sym) {
e                1233 scripts/kconfig/symbol.c 	expr_list_for_each_sym(prop->expr, e, sym)
e                 749 scripts/mod/modpost.c 			const char *e = is_vmlinux(mod->name) ?"":".ko";
e                 751 scripts/mod/modpost.c 			     symname + strlen("__crc_"), mod->name, e);
e                2154 scripts/mod/modpost.c 	const char *e = is_vmlinux(m) ?"":".ko";
e                2159 scripts/mod/modpost.c 		      "uses GPL-only symbol '%s'\n", m, e, s);
e                2163 scripts/mod/modpost.c 		      "uses GPL-only symbol marked UNUSED '%s'\n", m, e, s);
e                2167 scripts/mod/modpost.c 		      "uses future GPL-only symbol '%s'\n", m, e, s);
e                2179 scripts/mod/modpost.c 	const char *e = is_vmlinux(m) ?"":".ko";
e                2185 scripts/mod/modpost.c 		      "uses symbol '%s' marked UNUSED\n", m, e, s);
e                  83 scripts/sign-file.c 	int e, line;
e                  89 scripts/sign-file.c 	while ((e = ERR_get_error_line(&file, &line))) {
e                  90 scripts/sign-file.c 		ERR_error_string(e, buf);
e                 140 scripts/sign-file.c 		ENGINE *e;
e                 144 scripts/sign-file.c 		e = ENGINE_by_id("pkcs11");
e                 145 scripts/sign-file.c 		ERR(!e, "Load PKCS#11 ENGINE");
e                 146 scripts/sign-file.c 		if (ENGINE_init(e))
e                 151 scripts/sign-file.c 			ERR(!ENGINE_ctrl_cmd_string(e, "PIN", key_pass, 0),
e                 153 scripts/sign-file.c 		private_key = ENGINE_load_private_key(e, private_key_name,
e                 281 security/apparmor/file.c 	int e = 0;
e                 287 security/apparmor/file.c 		e = -EACCES;
e                 289 security/apparmor/file.c 			     cond->uid, NULL, e);
e                  94 security/apparmor/include/perms.h 	int e, error = FN1;	\
e                  95 security/apparmor/include/perms.h 	e = FN2;		\
e                  96 security/apparmor/include/perms.h 	if (e)			\
e                  97 security/apparmor/include/perms.h 		error = e;	\
e                 105 security/apparmor/policy_unpack.c 		       const char *name, const char *info, struct aa_ext *e,
e                 110 security/apparmor/policy_unpack.c 	if (e)
e                 111 security/apparmor/policy_unpack.c 		aad(&sa)->iface.pos = e->pos - e->start;
e                 198 security/apparmor/policy_unpack.c static bool inbounds(struct aa_ext *e, size_t size)
e                 200 security/apparmor/policy_unpack.c 	return (size <= e->end - e->pos);
e                 219 security/apparmor/policy_unpack.c static size_t unpack_u16_chunk(struct aa_ext *e, char **chunk)
e                 222 security/apparmor/policy_unpack.c 	void *pos = e->pos;
e                 224 security/apparmor/policy_unpack.c 	if (!inbounds(e, sizeof(u16)))
e                 226 security/apparmor/policy_unpack.c 	size = le16_to_cpu(get_unaligned((__le16 *) e->pos));
e                 227 security/apparmor/policy_unpack.c 	e->pos += sizeof(__le16);
e                 228 security/apparmor/policy_unpack.c 	if (!inbounds(e, size))
e                 230 security/apparmor/policy_unpack.c 	*chunk = e->pos;
e                 231 security/apparmor/policy_unpack.c 	e->pos += size;
e                 235 security/apparmor/policy_unpack.c 	e->pos = pos;
e                 240 security/apparmor/policy_unpack.c static bool unpack_X(struct aa_ext *e, enum aa_code code)
e                 242 security/apparmor/policy_unpack.c 	if (!inbounds(e, 1))
e                 244 security/apparmor/policy_unpack.c 	if (*(u8 *) e->pos != code)
e                 246 security/apparmor/policy_unpack.c 	e->pos++;
e                 266 security/apparmor/policy_unpack.c static bool unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name)
e                 271 security/apparmor/policy_unpack.c 	void *pos = e->pos;
e                 276 security/apparmor/policy_unpack.c 	if (unpack_X(e, AA_NAME)) {
e                 278 security/apparmor/policy_unpack.c 		size_t size = unpack_u16_chunk(e, &tag);
e                 288 security/apparmor/policy_unpack.c 	if (unpack_X(e, code))
e                 292 security/apparmor/policy_unpack.c 	e->pos = pos;
e                 296 security/apparmor/policy_unpack.c static bool unpack_u8(struct aa_ext *e, u8 *data, const char *name)
e                 298 security/apparmor/policy_unpack.c 	void *pos = e->pos;
e                 300 security/apparmor/policy_unpack.c 	if (unpack_nameX(e, AA_U8, name)) {
e                 301 security/apparmor/policy_unpack.c 		if (!inbounds(e, sizeof(u8)))
e                 304 security/apparmor/policy_unpack.c 			*data = get_unaligned((u8 *)e->pos);
e                 305 security/apparmor/policy_unpack.c 		e->pos += sizeof(u8);
e                 310 security/apparmor/policy_unpack.c 	e->pos = pos;
e                 314 security/apparmor/policy_unpack.c static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name)
e                 316 security/apparmor/policy_unpack.c 	void *pos = e->pos;
e                 318 security/apparmor/policy_unpack.c 	if (unpack_nameX(e, AA_U32, name)) {
e                 319 security/apparmor/policy_unpack.c 		if (!inbounds(e, sizeof(u32)))
e                 322 security/apparmor/policy_unpack.c 			*data = le32_to_cpu(get_unaligned((__le32 *) e->pos));
e                 323 security/apparmor/policy_unpack.c 		e->pos += sizeof(u32);
e                 328 security/apparmor/policy_unpack.c 	e->pos = pos;
e                 332 security/apparmor/policy_unpack.c static bool unpack_u64(struct aa_ext *e, u64 *data, const char *name)
e                 334 security/apparmor/policy_unpack.c 	void *pos = e->pos;
e                 336 security/apparmor/policy_unpack.c 	if (unpack_nameX(e, AA_U64, name)) {
e                 337 security/apparmor/policy_unpack.c 		if (!inbounds(e, sizeof(u64)))
e                 340 security/apparmor/policy_unpack.c 			*data = le64_to_cpu(get_unaligned((__le64 *) e->pos));
e                 341 security/apparmor/policy_unpack.c 		e->pos += sizeof(u64);
e                 346 security/apparmor/policy_unpack.c 	e->pos = pos;
e                 350 security/apparmor/policy_unpack.c static size_t unpack_array(struct aa_ext *e, const char *name)
e                 352 security/apparmor/policy_unpack.c 	void *pos = e->pos;
e                 354 security/apparmor/policy_unpack.c 	if (unpack_nameX(e, AA_ARRAY, name)) {
e                 356 security/apparmor/policy_unpack.c 		if (!inbounds(e, sizeof(u16)))
e                 358 security/apparmor/policy_unpack.c 		size = (int)le16_to_cpu(get_unaligned((__le16 *) e->pos));
e                 359 security/apparmor/policy_unpack.c 		e->pos += sizeof(u16);
e                 364 security/apparmor/policy_unpack.c 	e->pos = pos;
e                 368 security/apparmor/policy_unpack.c static size_t unpack_blob(struct aa_ext *e, char **blob, const char *name)
e                 370 security/apparmor/policy_unpack.c 	void *pos = e->pos;
e                 372 security/apparmor/policy_unpack.c 	if (unpack_nameX(e, AA_BLOB, name)) {
e                 374 security/apparmor/policy_unpack.c 		if (!inbounds(e, sizeof(u32)))
e                 376 security/apparmor/policy_unpack.c 		size = le32_to_cpu(get_unaligned((__le32 *) e->pos));
e                 377 security/apparmor/policy_unpack.c 		e->pos += sizeof(u32);
e                 378 security/apparmor/policy_unpack.c 		if (inbounds(e, (size_t) size)) {
e                 379 security/apparmor/policy_unpack.c 			*blob = e->pos;
e                 380 security/apparmor/policy_unpack.c 			e->pos += size;
e                 386 security/apparmor/policy_unpack.c 	e->pos = pos;
e                 390 security/apparmor/policy_unpack.c static int unpack_str(struct aa_ext *e, const char **string, const char *name)
e                 394 security/apparmor/policy_unpack.c 	void *pos = e->pos;
e                 396 security/apparmor/policy_unpack.c 	if (unpack_nameX(e, AA_STRING, name)) {
e                 397 security/apparmor/policy_unpack.c 		size = unpack_u16_chunk(e, &src_str);
e                 409 security/apparmor/policy_unpack.c 	e->pos = pos;
e                 413 security/apparmor/policy_unpack.c static int unpack_strdup(struct aa_ext *e, char **string, const char *name)
e                 416 security/apparmor/policy_unpack.c 	void *pos = e->pos;
e                 417 security/apparmor/policy_unpack.c 	int res = unpack_str(e, &tmp, name);
e                 425 security/apparmor/policy_unpack.c 		e->pos = pos;
e                 439 security/apparmor/policy_unpack.c static struct aa_dfa *unpack_dfa(struct aa_ext *e)
e                 445 security/apparmor/policy_unpack.c 	size = unpack_blob(e, &blob, "aadfa");
e                 452 security/apparmor/policy_unpack.c 		size_t sz = blob - (char *) e->start -
e                 453 security/apparmor/policy_unpack.c 			((e->pos - e->start) & 7);
e                 474 security/apparmor/policy_unpack.c static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
e                 476 security/apparmor/policy_unpack.c 	void *saved_pos = e->pos;
e                 479 security/apparmor/policy_unpack.c 	if (unpack_nameX(e, AA_STRUCT, "xtable")) {
e                 482 security/apparmor/policy_unpack.c 		size = unpack_array(e, NULL);
e                 494 security/apparmor/policy_unpack.c 			int c, j, pos, size2 = unpack_strdup(e, &str, NULL);
e                 530 security/apparmor/policy_unpack.c 		if (!unpack_nameX(e, AA_ARRAYEND, NULL))
e                 532 security/apparmor/policy_unpack.c 		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
e                 539 security/apparmor/policy_unpack.c 	e->pos = saved_pos;
e                 543 security/apparmor/policy_unpack.c static bool unpack_xattrs(struct aa_ext *e, struct aa_profile *profile)
e                 545 security/apparmor/policy_unpack.c 	void *pos = e->pos;
e                 547 security/apparmor/policy_unpack.c 	if (unpack_nameX(e, AA_STRUCT, "xattrs")) {
e                 550 security/apparmor/policy_unpack.c 		size = unpack_array(e, NULL);
e                 556 security/apparmor/policy_unpack.c 			if (!unpack_strdup(e, &profile->xattrs[i], NULL))
e                 559 security/apparmor/policy_unpack.c 		if (!unpack_nameX(e, AA_ARRAYEND, NULL))
e                 561 security/apparmor/policy_unpack.c 		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
e                 568 security/apparmor/policy_unpack.c 	e->pos = pos;
e                 572 security/apparmor/policy_unpack.c static bool unpack_secmark(struct aa_ext *e, struct aa_profile *profile)
e                 574 security/apparmor/policy_unpack.c 	void *pos = e->pos;
e                 577 security/apparmor/policy_unpack.c 	if (unpack_nameX(e, AA_STRUCT, "secmark")) {
e                 578 security/apparmor/policy_unpack.c 		size = unpack_array(e, NULL);
e                 588 security/apparmor/policy_unpack.c 			if (!unpack_u8(e, &profile->secmark[i].audit, NULL))
e                 590 security/apparmor/policy_unpack.c 			if (!unpack_u8(e, &profile->secmark[i].deny, NULL))
e                 592 security/apparmor/policy_unpack.c 			if (!unpack_strdup(e, &profile->secmark[i].label, NULL))
e                 595 security/apparmor/policy_unpack.c 		if (!unpack_nameX(e, AA_ARRAYEND, NULL))
e                 597 security/apparmor/policy_unpack.c 		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
e                 612 security/apparmor/policy_unpack.c 	e->pos = pos;
e                 616 security/apparmor/policy_unpack.c static bool unpack_rlimits(struct aa_ext *e, struct aa_profile *profile)
e                 618 security/apparmor/policy_unpack.c 	void *pos = e->pos;
e                 621 security/apparmor/policy_unpack.c 	if (unpack_nameX(e, AA_STRUCT, "rlimits")) {
e                 624 security/apparmor/policy_unpack.c 		if (!unpack_u32(e, &tmp, NULL))
e                 628 security/apparmor/policy_unpack.c 		size = unpack_array(e, NULL);
e                 634 security/apparmor/policy_unpack.c 			if (!unpack_u64(e, &tmp2, NULL))
e                 638 security/apparmor/policy_unpack.c 		if (!unpack_nameX(e, AA_ARRAYEND, NULL))
e                 640 security/apparmor/policy_unpack.c 		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
e                 646 security/apparmor/policy_unpack.c 	e->pos = pos;
e                 671 security/apparmor/policy_unpack.c static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
e                 687 security/apparmor/policy_unpack.c 	if (!unpack_nameX(e, AA_STRUCT, "profile"))
e                 689 security/apparmor/policy_unpack.c 	if (!unpack_str(e, &name, NULL))
e                 709 security/apparmor/policy_unpack.c 	(void) unpack_str(e, &profile->rename, "rename");
e                 712 security/apparmor/policy_unpack.c 	(void) unpack_str(e, &profile->attach, "attach");
e                 715 security/apparmor/policy_unpack.c 	profile->xmatch = unpack_dfa(e);
e                 724 security/apparmor/policy_unpack.c 		if (!unpack_u32(e, &tmp, NULL)) {
e                 732 security/apparmor/policy_unpack.c 	(void) unpack_str(e, &profile->disconnected, "disconnected");
e                 735 security/apparmor/policy_unpack.c 	if (!unpack_nameX(e, AA_STRUCT, "flags")) {
e                 740 security/apparmor/policy_unpack.c 	if (!unpack_u32(e, &tmp, NULL))
e                 744 security/apparmor/policy_unpack.c 	if (!unpack_u32(e, &tmp, NULL))
e                 746 security/apparmor/policy_unpack.c 	if (tmp == PACKED_MODE_COMPLAIN || (e->version & FORCE_COMPLAIN_FLAG))
e                 752 security/apparmor/policy_unpack.c 	if (!unpack_u32(e, &tmp, NULL))
e                 757 security/apparmor/policy_unpack.c 	if (!unpack_nameX(e, AA_STRUCTEND, NULL))
e                 761 security/apparmor/policy_unpack.c 	if (unpack_u32(e, &profile->path_flags, "path_flags"))
e                 769 security/apparmor/policy_unpack.c 	if (!unpack_u32(e, &(profile->caps.allow.cap[0]), NULL))
e                 771 security/apparmor/policy_unpack.c 	if (!unpack_u32(e, &(profile->caps.audit.cap[0]), NULL))
e                 773 security/apparmor/policy_unpack.c 	if (!unpack_u32(e, &(profile->caps.quiet.cap[0]), NULL))
e                 775 security/apparmor/policy_unpack.c 	if (!unpack_u32(e, &tmpcap.cap[0], NULL))
e                 779 security/apparmor/policy_unpack.c 	if (unpack_nameX(e, AA_STRUCT, "caps64")) {
e                 781 security/apparmor/policy_unpack.c 		if (!unpack_u32(e, &(profile->caps.allow.cap[1]), NULL))
e                 783 security/apparmor/policy_unpack.c 		if (!unpack_u32(e, &(profile->caps.audit.cap[1]), NULL))
e                 785 security/apparmor/policy_unpack.c 		if (!unpack_u32(e, &(profile->caps.quiet.cap[1]), NULL))
e                 787 security/apparmor/policy_unpack.c 		if (!unpack_u32(e, &(tmpcap.cap[1]), NULL))
e                 789 security/apparmor/policy_unpack.c 		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
e                 794 security/apparmor/policy_unpack.c 	if (unpack_nameX(e, AA_STRUCT, "capsx")) {
e                 796 security/apparmor/policy_unpack.c 		if (!unpack_u32(e, &(profile->caps.extended.cap[0]), NULL))
e                 798 security/apparmor/policy_unpack.c 		if (!unpack_u32(e, &(profile->caps.extended.cap[1]), NULL))
e                 800 security/apparmor/policy_unpack.c 		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
e                 804 security/apparmor/policy_unpack.c 	if (!unpack_xattrs(e, profile)) {
e                 809 security/apparmor/policy_unpack.c 	if (!unpack_rlimits(e, profile)) {
e                 814 security/apparmor/policy_unpack.c 	if (!unpack_secmark(e, profile)) {
e                 819 security/apparmor/policy_unpack.c 	if (unpack_nameX(e, AA_STRUCT, "policydb")) {
e                 822 security/apparmor/policy_unpack.c 		profile->policy.dfa = unpack_dfa(e);
e                 831 security/apparmor/policy_unpack.c 		if (!unpack_u32(e, &profile->policy.start[0], "start"))
e                 841 security/apparmor/policy_unpack.c 		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
e                 847 security/apparmor/policy_unpack.c 	profile->file.dfa = unpack_dfa(e);
e                 854 security/apparmor/policy_unpack.c 		if (!unpack_u32(e, &profile->file.start, "dfa_start"))
e                 864 security/apparmor/policy_unpack.c 	if (!unpack_trans_table(e, profile)) {
e                 869 security/apparmor/policy_unpack.c 	if (unpack_nameX(e, AA_STRUCT, "data")) {
e                 887 security/apparmor/policy_unpack.c 		while (unpack_strdup(e, &key, NULL)) {
e                 895 security/apparmor/policy_unpack.c 			data->size = unpack_blob(e, &data->data, NULL);
e                 907 security/apparmor/policy_unpack.c 		if (!unpack_nameX(e, AA_STRUCTEND, NULL)) {
e                 913 security/apparmor/policy_unpack.c 	if (!unpack_nameX(e, AA_STRUCTEND, NULL)) {
e                 925 security/apparmor/policy_unpack.c 	audit_iface(profile, NULL, name, info, e, error);
e                 939 security/apparmor/policy_unpack.c static int verify_header(struct aa_ext *e, int required, const char **ns)
e                 946 security/apparmor/policy_unpack.c 	if (!unpack_u32(e, &e->version, "version")) {
e                 949 security/apparmor/policy_unpack.c 				    e, error);
e                 958 security/apparmor/policy_unpack.c 	if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v7)) {
e                 960 security/apparmor/policy_unpack.c 			    e, error);
e                 965 security/apparmor/policy_unpack.c 	if (unpack_str(e, &name, "namespace")) {
e                 968 security/apparmor/policy_unpack.c 				    e, error);
e                 972 security/apparmor/policy_unpack.c 			audit_iface(NULL, NULL, NULL, "invalid ns change", e,
e                1060 security/apparmor/policy_unpack.c 	struct aa_ext e = {
e                1067 security/apparmor/policy_unpack.c 	while (e.pos < e.end) {
e                1070 security/apparmor/policy_unpack.c 		error = verify_header(&e, e.pos == e.start, ns);
e                1074 security/apparmor/policy_unpack.c 		start = e.pos;
e                1075 security/apparmor/policy_unpack.c 		profile = unpack_profile(&e, &ns_name);
e                1086 security/apparmor/policy_unpack.c 			error = aa_calc_profile_hash(profile, e.version, start,
e                1087 security/apparmor/policy_unpack.c 						     e.pos - start);
e                1101 security/apparmor/policy_unpack.c 	udata->abi = e.version & K_ABI_MASK;
e                  84 security/apparmor/resource.c 	int e = 0;
e                  88 security/apparmor/resource.c 		e = -EACCES;
e                  90 security/apparmor/resource.c 			      e);
e                 132 security/integrity/ima/ima_fs.c 	struct ima_template_entry *e;
e                 139 security/integrity/ima/ima_fs.c 	e = qe->entry;
e                 140 security/integrity/ima/ima_fs.c 	if (e == NULL)
e                 143 security/integrity/ima/ima_fs.c 	template_name = (e->template_desc->name[0] != '\0') ?
e                 144 security/integrity/ima/ima_fs.c 	    e->template_desc->name : e->template_desc->fmt;
e                 151 security/integrity/ima/ima_fs.c 	pcr = !ima_canonical_fmt ? e->pcr : cpu_to_le32(e->pcr);
e                 152 security/integrity/ima/ima_fs.c 	ima_putc(m, &pcr, sizeof(e->pcr));
e                 155 security/integrity/ima/ima_fs.c 	ima_putc(m, e->digest, TPM_DIGEST_SIZE);
e                 170 security/integrity/ima/ima_fs.c 		template_data_len = !ima_canonical_fmt ? e->template_data_len :
e                 171 security/integrity/ima/ima_fs.c 			cpu_to_le32(e->template_data_len);
e                 172 security/integrity/ima/ima_fs.c 		ima_putc(m, &template_data_len, sizeof(e->template_data_len));
e                 176 security/integrity/ima/ima_fs.c 	for (i = 0; i < e->template_desc->num_fields; i++) {
e                 179 security/integrity/ima/ima_fs.c 			e->template_desc->fields[i];
e                 185 security/integrity/ima/ima_fs.c 		field->field_show(m, show, &e->template_data[i]);
e                 222 security/integrity/ima/ima_fs.c 	struct ima_template_entry *e;
e                 227 security/integrity/ima/ima_fs.c 	e = qe->entry;
e                 228 security/integrity/ima/ima_fs.c 	if (e == NULL)
e                 231 security/integrity/ima/ima_fs.c 	template_name = (e->template_desc->name[0] != '\0') ?
e                 232 security/integrity/ima/ima_fs.c 	    e->template_desc->name : e->template_desc->fmt;
e                 235 security/integrity/ima/ima_fs.c 	seq_printf(m, "%2d ", e->pcr);
e                 238 security/integrity/ima/ima_fs.c 	ima_print_digest(m, e->digest, TPM_DIGEST_SIZE);
e                 244 security/integrity/ima/ima_fs.c 	for (i = 0; i < e->template_desc->num_fields; i++) {
e                 246 security/integrity/ima/ima_fs.c 		if (e->template_data[i].len == 0)
e                 249 security/integrity/ima/ima_fs.c 		e->template_desc->fields[i]->field_show(m, IMA_SHOW_ASCII,
e                 250 security/integrity/ima/ima_fs.c 							&e->template_data[i]);
e                 326 security/integrity/ima/ima_policy.c 	struct ima_rule_entry *entry, *e;
e                 329 security/integrity/ima/ima_policy.c 	list_for_each_entry_safe(entry, e, &ima_policy_rules, list) {
e                 242 security/selinux/ss/ebitmap.c int ebitmap_get_bit(struct ebitmap *e, unsigned long bit)
e                 246 security/selinux/ss/ebitmap.c 	if (e->highbit < bit)
e                 249 security/selinux/ss/ebitmap.c 	n = e->node;
e                 259 security/selinux/ss/ebitmap.c int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value)
e                 264 security/selinux/ss/ebitmap.c 	n = e->node;
e                 285 security/selinux/ss/ebitmap.c 						e->highbit = prev->startbit
e                 288 security/selinux/ss/ebitmap.c 						e->highbit = 0;
e                 293 security/selinux/ss/ebitmap.c 					e->node = n->next;
e                 314 security/selinux/ss/ebitmap.c 		e->highbit = new->startbit + EBITMAP_SIZE;
e                 320 security/selinux/ss/ebitmap.c 		new->next = e->node;
e                 321 security/selinux/ss/ebitmap.c 		e->node = new;
e                 327 security/selinux/ss/ebitmap.c void ebitmap_destroy(struct ebitmap *e)
e                 331 security/selinux/ss/ebitmap.c 	if (!e)
e                 334 security/selinux/ss/ebitmap.c 	n = e->node;
e                 341 security/selinux/ss/ebitmap.c 	e->highbit = 0;
e                 342 security/selinux/ss/ebitmap.c 	e->node = NULL;
e                 346 security/selinux/ss/ebitmap.c int ebitmap_read(struct ebitmap *e, void *fp)
e                 356 security/selinux/ss/ebitmap.c 	ebitmap_init(e);
e                 363 security/selinux/ss/ebitmap.c 	e->highbit = le32_to_cpu(buf[1]);
e                 369 security/selinux/ss/ebitmap.c 		       mapunit, BITS_PER_U64, e->highbit);
e                 374 security/selinux/ss/ebitmap.c 	e->highbit += EBITMAP_SIZE - 1;
e                 375 security/selinux/ss/ebitmap.c 	e->highbit -= (e->highbit % EBITMAP_SIZE);
e                 377 security/selinux/ss/ebitmap.c 	if (!e->highbit) {
e                 378 security/selinux/ss/ebitmap.c 		e->node = NULL;
e                 382 security/selinux/ss/ebitmap.c 	if (e->highbit && !count)
e                 399 security/selinux/ss/ebitmap.c 		if (startbit > e->highbit - mapunit) {
e                 402 security/selinux/ss/ebitmap.c 			       startbit, (e->highbit - mapunit));
e                 419 security/selinux/ss/ebitmap.c 				e->node = tmp;
e                 448 security/selinux/ss/ebitmap.c 	ebitmap_destroy(e);
e                 452 security/selinux/ss/ebitmap.c int ebitmap_write(struct ebitmap *e, void *fp)
e                 465 security/selinux/ss/ebitmap.c 	ebitmap_for_each_positive_bit(e, n, bit) {
e                 481 security/selinux/ss/ebitmap.c 	ebitmap_for_each_positive_bit(e, n, bit) {
e                  45 security/selinux/ss/ebitmap.h #define ebitmap_length(e) ((e)->highbit)
e                  47 security/selinux/ss/ebitmap.h static inline unsigned int ebitmap_start_positive(struct ebitmap *e,
e                  52 security/selinux/ss/ebitmap.h 	for (*n = e->node; *n; *n = (*n)->next) {
e                  57 security/selinux/ss/ebitmap.h 	return ebitmap_length(e);
e                  60 security/selinux/ss/ebitmap.h static inline void ebitmap_init(struct ebitmap *e)
e                  62 security/selinux/ss/ebitmap.h 	memset(e, 0, sizeof(*e));
e                  65 security/selinux/ss/ebitmap.h static inline unsigned int ebitmap_next_positive(struct ebitmap *e,
e                  80 security/selinux/ss/ebitmap.h 	return ebitmap_length(e);
e                 120 security/selinux/ss/ebitmap.h #define ebitmap_for_each_positive_bit(e, n, bit)	\
e                 121 security/selinux/ss/ebitmap.h 	for (bit = ebitmap_start_positive(e, &n);	\
e                 122 security/selinux/ss/ebitmap.h 	     bit < ebitmap_length(e);			\
e                 123 security/selinux/ss/ebitmap.h 	     bit = ebitmap_next_positive(e, &n, bit))	\
e                 128 security/selinux/ss/ebitmap.h int ebitmap_get_bit(struct ebitmap *e, unsigned long bit);
e                 129 security/selinux/ss/ebitmap.h int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value);
e                 130 security/selinux/ss/ebitmap.h void ebitmap_destroy(struct ebitmap *e);
e                 131 security/selinux/ss/ebitmap.h int ebitmap_read(struct ebitmap *e, void *fp);
e                 132 security/selinux/ss/ebitmap.h int ebitmap_write(struct ebitmap *e, void *fp);
e                  40 security/selinux/ss/mls.c 	struct ebitmap *e;
e                  54 security/selinux/ss/mls.c 		e = &context->range.level[l].cat;
e                  55 security/selinux/ss/mls.c 		ebitmap_for_each_positive_bit(e, node, i) {
e                  95 security/selinux/ss/mls.c 	struct ebitmap *e;
e                 114 security/selinux/ss/mls.c 		e = &context->range.level[l].cat;
e                 115 security/selinux/ss/mls.c 		ebitmap_for_each_positive_bit(e, node, i) {
e                 223 security/selinux/ss/policydb.c 	struct constraint_expr *e, *etmp;
e                 232 security/selinux/ss/policydb.c 			e = constraint->expr;
e                 233 security/selinux/ss/policydb.c 			while (e) {
e                 234 security/selinux/ss/policydb.c 				etmp = e;
e                 235 security/selinux/ss/policydb.c 				e = e->next;
e                 245 security/selinux/ss/policydb.c 			e = constraint->expr;
e                 246 security/selinux/ss/policydb.c 			while (e) {
e                 247 security/selinux/ss/policydb.c 				etmp = e;
e                 248 security/selinux/ss/policydb.c 				e = e->next;
e                1189 security/selinux/ss/policydb.c 	struct constraint_expr *e, *le;
e                1213 security/selinux/ss/policydb.c 			e = kzalloc(sizeof(*e), GFP_KERNEL);
e                1214 security/selinux/ss/policydb.c 			if (!e)
e                1218 security/selinux/ss/policydb.c 				le->next = e;
e                1220 security/selinux/ss/policydb.c 				c->expr = e;
e                1225 security/selinux/ss/policydb.c 			e->expr_type = le32_to_cpu(buf[0]);
e                1226 security/selinux/ss/policydb.c 			e->attr = le32_to_cpu(buf[1]);
e                1227 security/selinux/ss/policydb.c 			e->op = le32_to_cpu(buf[2]);
e                1229 security/selinux/ss/policydb.c 			switch (e->expr_type) {
e                1246 security/selinux/ss/policydb.c 				if (!allowxtarget && (e->attr & CEXPR_XTARGET))
e                1251 security/selinux/ss/policydb.c 				rc = ebitmap_read(&e->names, fp);
e                1256 security/selinux/ss/policydb.c 						e->type_names = kzalloc(sizeof
e                1257 security/selinux/ss/policydb.c 						(*e->type_names),
e                1259 security/selinux/ss/policydb.c 					if (!e->type_names)
e                1261 security/selinux/ss/policydb.c 					type_set_init(e->type_names);
e                1262 security/selinux/ss/policydb.c 					rc = type_set_read(e->type_names, fp);
e                1270 security/selinux/ss/policydb.c 			le = e;
e                2510 security/selinux/ss/policydb.c 		struct ebitmap *e = &p->type_attr_map_array[i];
e                2513 security/selinux/ss/policydb.c 			rc = ebitmap_read(e, fp);
e                2518 security/selinux/ss/policydb.c 		rc = ebitmap_set_bit(e, i, 1);
e                2810 security/selinux/ss/policydb.c 	struct constraint_expr *e;
e                2817 security/selinux/ss/policydb.c 		for (e = c->expr; e; e = e->next)
e                2824 security/selinux/ss/policydb.c 		for (e = c->expr; e; e = e->next) {
e                2825 security/selinux/ss/policydb.c 			buf[0] = cpu_to_le32(e->expr_type);
e                2826 security/selinux/ss/policydb.c 			buf[1] = cpu_to_le32(e->attr);
e                2827 security/selinux/ss/policydb.c 			buf[2] = cpu_to_le32(e->op);
e                2832 security/selinux/ss/policydb.c 			switch (e->expr_type) {
e                2834 security/selinux/ss/policydb.c 				rc = ebitmap_write(&e->names, fp);
e                2839 security/selinux/ss/policydb.c 					rc = type_set_write(e->type_names, fp);
e                3505 security/selinux/ss/policydb.c 		struct ebitmap *e = &p->type_attr_map_array[i];
e                3507 security/selinux/ss/policydb.c 		rc = ebitmap_write(e, fp);
e                 272 security/selinux/ss/services.c 	struct constraint_expr *e;
e                 276 security/selinux/ss/services.c 	for (e = cexpr; e; e = e->next) {
e                 277 security/selinux/ss/services.c 		switch (e->expr_type) {
e                 295 security/selinux/ss/services.c 			switch (e->attr) {
e                 309 security/selinux/ss/services.c 				switch (e->op) {
e                 353 security/selinux/ss/services.c 			switch (e->op) {
e                 379 security/selinux/ss/services.c 			switch (e->op) {
e                 395 security/selinux/ss/services.c 			if (e->attr & CEXPR_TARGET)
e                 397 security/selinux/ss/services.c 			else if (e->attr & CEXPR_XTARGET) {
e                 404 security/selinux/ss/services.c 			if (e->attr & CEXPR_USER)
e                 406 security/selinux/ss/services.c 			else if (e->attr & CEXPR_ROLE)
e                 408 security/selinux/ss/services.c 			else if (e->attr & CEXPR_TYPE)
e                 415 security/selinux/ss/services.c 			switch (e->op) {
e                 417 security/selinux/ss/services.c 				s[++sp] = ebitmap_get_bit(&e->names, val1 - 1);
e                 420 security/selinux/ss/services.c 				s[++sp] = !ebitmap_get_bit(&e->names, val1 - 1);
e                 864 security/tomoyo/common.c 	struct tomoyo_manager e = { };
e                 875 security/tomoyo/common.c 	e.manager = tomoyo_get_name(manager);
e                 876 security/tomoyo/common.c 	if (e.manager) {
e                 877 security/tomoyo/common.c 		error = tomoyo_update_policy(&e.head, sizeof(e), &param,
e                 879 security/tomoyo/common.c 		tomoyo_put_name(e.manager);
e                1066 security/tomoyo/common.c 		struct tomoyo_task_acl e = {
e                1071 security/tomoyo/common.c 		if (e.domainname)
e                1072 security/tomoyo/common.c 			error = tomoyo_update_domain(&e.head, sizeof(e), param,
e                1075 security/tomoyo/common.c 		tomoyo_put_name(e.domainname);
e                 455 security/tomoyo/condition.c 					   struct tomoyo_condition *e)
e                 461 security/tomoyo/condition.c 		e->transit = tomoyo_get_domainname(param);
e                 477 security/tomoyo/condition.c 	e->transit = tomoyo_get_name(tomoyo_read_token(param));
e                 479 security/tomoyo/condition.c 	if (e->transit)
e                 503 security/tomoyo/condition.c 	struct tomoyo_condition e = { };
e                 505 security/tomoyo/condition.c 		tomoyo_get_transit_preference(param, &e);
e                 566 security/tomoyo/condition.c 				e.argc++;
e                 567 security/tomoyo/condition.c 				e.condc++;
e                 569 security/tomoyo/condition.c 				e.argc--;
e                 570 security/tomoyo/condition.c 				e.condc--;
e                 581 security/tomoyo/condition.c 				e.envc++;
e                 582 security/tomoyo/condition.c 				e.condc++;
e                 584 security/tomoyo/condition.c 				e.envc--;
e                 585 security/tomoyo/condition.c 				e.condc--;
e                 599 security/tomoyo/condition.c 				e.numbers_count++;
e                 601 security/tomoyo/condition.c 				e.numbers_count--;
e                 611 security/tomoyo/condition.c 			e.condc++;
e                 613 security/tomoyo/condition.c 			e.condc--;
e                 617 security/tomoyo/condition.c 				e.names_count++;
e                 619 security/tomoyo/condition.c 				e.names_count--;
e                 631 security/tomoyo/condition.c 				e.numbers_count++;
e                 633 security/tomoyo/condition.c 				e.numbers_count--;
e                 656 security/tomoyo/condition.c 		__LINE__, e.condc, e.numbers_count, e.names_count, e.argc,
e                 657 security/tomoyo/condition.c 		e.envc);
e                 659 security/tomoyo/condition.c 		BUG_ON(e.names_count | e.numbers_count | e.argc | e.envc |
e                 660 security/tomoyo/condition.c 		       e.condc);
e                 663 security/tomoyo/condition.c 	e.size = sizeof(*entry)
e                 664 security/tomoyo/condition.c 		+ e.condc * sizeof(struct tomoyo_condition_element)
e                 665 security/tomoyo/condition.c 		+ e.numbers_count * sizeof(struct tomoyo_number_union)
e                 666 security/tomoyo/condition.c 		+ e.names_count * sizeof(struct tomoyo_name_union)
e                 667 security/tomoyo/condition.c 		+ e.argc * sizeof(struct tomoyo_argv)
e                 668 security/tomoyo/condition.c 		+ e.envc * sizeof(struct tomoyo_envp);
e                 669 security/tomoyo/condition.c 	entry = kzalloc(e.size, GFP_NOFS);
e                 672 security/tomoyo/condition.c 	*entry = e;
e                 673 security/tomoyo/condition.c 	e.transit = NULL;
e                 675 security/tomoyo/condition.c 	numbers_p = (struct tomoyo_number_union *) (condp + e.condc);
e                 676 security/tomoyo/condition.c 	names_p = (struct tomoyo_name_union *) (numbers_p + e.numbers_count);
e                 677 security/tomoyo/condition.c 	argv = (struct tomoyo_argv *) (names_p + e.names_count);
e                 678 security/tomoyo/condition.c 	envp = (struct tomoyo_envp *) (argv + e.argc);
e                 702 security/tomoyo/condition.c 	tomoyo_put_name(e.transit);
e                 245 security/tomoyo/domain.c 	struct tomoyo_transition_control e = { .type = type };
e                 261 security/tomoyo/domain.c 		e.program = tomoyo_get_name(program);
e                 262 security/tomoyo/domain.c 		if (!e.program)
e                 269 security/tomoyo/domain.c 			e.is_last_name = true;
e                 271 security/tomoyo/domain.c 		e.domainname = tomoyo_get_name(domainname);
e                 272 security/tomoyo/domain.c 		if (!e.domainname)
e                 276 security/tomoyo/domain.c 	error = tomoyo_update_policy(&e.head, sizeof(e), param,
e                 279 security/tomoyo/domain.c 	tomoyo_put_name(e.domainname);
e                 280 security/tomoyo/domain.c 	tomoyo_put_name(e.program);
e                 405 security/tomoyo/domain.c 	struct tomoyo_aggregator e = { };
e                 413 security/tomoyo/domain.c 	e.original_name = tomoyo_get_name(original_name);
e                 414 security/tomoyo/domain.c 	e.aggregated_name = tomoyo_get_name(aggregated_name);
e                 415 security/tomoyo/domain.c 	if (!e.original_name || !e.aggregated_name ||
e                 416 security/tomoyo/domain.c 	    e.aggregated_name->is_patterned) /* No patterns allowed. */
e                 419 security/tomoyo/domain.c 	error = tomoyo_update_policy(&e.head, sizeof(e), param,
e                 422 security/tomoyo/domain.c 	tomoyo_put_name(e.original_name);
e                 423 security/tomoyo/domain.c 	tomoyo_put_name(e.aggregated_name);
e                 527 security/tomoyo/domain.c 	struct tomoyo_domain_info e = { };
e                 557 security/tomoyo/domain.c 	e.ns = tomoyo_assign_namespace(domainname);
e                 558 security/tomoyo/domain.c 	if (!e.ns)
e                 568 security/tomoyo/domain.c 		e.profile = domain->profile;
e                 569 security/tomoyo/domain.c 		memcpy(e.group, domain->group, sizeof(e.group));
e                 571 security/tomoyo/domain.c 	e.domainname = tomoyo_get_name(domainname);
e                 572 security/tomoyo/domain.c 	if (!e.domainname)
e                 578 security/tomoyo/domain.c 		entry = tomoyo_commit_ok(&e, sizeof(e));
e                 587 security/tomoyo/domain.c 	tomoyo_put_name(e.domainname);
e                  96 security/tomoyo/environ.c 	struct tomoyo_env_acl e = { .head.type = TOMOYO_TYPE_ENV_ACL };
e                 102 security/tomoyo/environ.c 	e.env = tomoyo_get_name(data);
e                 103 security/tomoyo/environ.c 	if (!e.env)
e                 105 security/tomoyo/environ.c 	error = tomoyo_update_domain(&e.head, sizeof(e), param,
e                 107 security/tomoyo/environ.c 	tomoyo_put_name(e.env);
e                 389 security/tomoyo/file.c 	struct tomoyo_path_acl e = {
e                 395 security/tomoyo/file.c 	if (!tomoyo_parse_name_union(param, &e.name))
e                 398 security/tomoyo/file.c 		error = tomoyo_update_domain(&e.head, sizeof(e), param,
e                 401 security/tomoyo/file.c 	tomoyo_put_name_union(&e.name);
e                 465 security/tomoyo/file.c 	struct tomoyo_mkdev_acl e = {
e                 471 security/tomoyo/file.c 	if (!tomoyo_parse_name_union(param, &e.name) ||
e                 472 security/tomoyo/file.c 	    !tomoyo_parse_number_union(param, &e.mode) ||
e                 473 security/tomoyo/file.c 	    !tomoyo_parse_number_union(param, &e.major) ||
e                 474 security/tomoyo/file.c 	    !tomoyo_parse_number_union(param, &e.minor))
e                 477 security/tomoyo/file.c 		error = tomoyo_update_domain(&e.head, sizeof(e), param,
e                 480 security/tomoyo/file.c 	tomoyo_put_name_union(&e.name);
e                 481 security/tomoyo/file.c 	tomoyo_put_number_union(&e.mode);
e                 482 security/tomoyo/file.c 	tomoyo_put_number_union(&e.major);
e                 483 security/tomoyo/file.c 	tomoyo_put_number_union(&e.minor);
e                 544 security/tomoyo/file.c 	struct tomoyo_path2_acl e = {
e                 550 security/tomoyo/file.c 	if (!tomoyo_parse_name_union(param, &e.name1) ||
e                 551 security/tomoyo/file.c 	    !tomoyo_parse_name_union(param, &e.name2))
e                 554 security/tomoyo/file.c 		error = tomoyo_update_domain(&e.head, sizeof(e), param,
e                 557 security/tomoyo/file.c 	tomoyo_put_name_union(&e.name1);
e                 558 security/tomoyo/file.c 	tomoyo_put_name_union(&e.name2);
e                 681 security/tomoyo/file.c 	struct tomoyo_path_number_acl e = {
e                 687 security/tomoyo/file.c 	if (!tomoyo_parse_name_union(param, &e.name) ||
e                 688 security/tomoyo/file.c 	    !tomoyo_parse_number_union(param, &e.number))
e                 691 security/tomoyo/file.c 		error = tomoyo_update_domain(&e.head, sizeof(e), param,
e                 694 security/tomoyo/file.c 	tomoyo_put_name_union(&e.name);
e                 695 security/tomoyo/file.c 	tomoyo_put_number_union(&e.number);
e                 985 security/tomoyo/file.c 	struct tomoyo_mount_acl e = { .head.type = TOMOYO_TYPE_MOUNT_ACL };
e                 988 security/tomoyo/file.c 	if (!tomoyo_parse_name_union(param, &e.dev_name) ||
e                 989 security/tomoyo/file.c 	    !tomoyo_parse_name_union(param, &e.dir_name) ||
e                 990 security/tomoyo/file.c 	    !tomoyo_parse_name_union(param, &e.fs_type) ||
e                 991 security/tomoyo/file.c 	    !tomoyo_parse_number_union(param, &e.flags))
e                 994 security/tomoyo/file.c 		error = tomoyo_update_domain(&e.head, sizeof(e), param,
e                 996 security/tomoyo/file.c 	tomoyo_put_name_union(&e.dev_name);
e                 997 security/tomoyo/file.c 	tomoyo_put_name_union(&e.dir_name);
e                 998 security/tomoyo/file.c 	tomoyo_put_name_union(&e.fs_type);
e                 999 security/tomoyo/file.c 	tomoyo_put_number_union(&e.flags);
e                  83 security/tomoyo/group.c 		struct tomoyo_path_group e = { };
e                  85 security/tomoyo/group.c 		e.member_name = tomoyo_get_name(tomoyo_read_token(param));
e                  86 security/tomoyo/group.c 		if (!e.member_name) {
e                  90 security/tomoyo/group.c 		error = tomoyo_update_policy(&e.head, sizeof(e), param,
e                  92 security/tomoyo/group.c 		tomoyo_put_name(e.member_name);
e                  94 security/tomoyo/group.c 		struct tomoyo_number_group e = { };
e                  97 security/tomoyo/group.c 		    !tomoyo_parse_number_union(param, &e.number))
e                  99 security/tomoyo/group.c 		error = tomoyo_update_policy(&e.head, sizeof(e), param,
e                 106 security/tomoyo/group.c 		struct tomoyo_address_group e = { };
e                 109 security/tomoyo/group.c 		    !tomoyo_parse_ipaddr_union(param, &e.address))
e                 111 security/tomoyo/group.c 		error = tomoyo_update_policy(&e.head, sizeof(e), param,
e                  98 security/tomoyo/memory.c 	struct tomoyo_group e = { };
e                 106 security/tomoyo/memory.c 	e.group_name = tomoyo_get_name(group_name);
e                 107 security/tomoyo/memory.c 	if (!e.group_name)
e                 113 security/tomoyo/memory.c 		if (e.group_name != group->group_name ||
e                 121 security/tomoyo/memory.c 		struct tomoyo_group *entry = tomoyo_commit_ok(&e, sizeof(e));
e                 133 security/tomoyo/memory.c 	tomoyo_put_name(e.group_name);
e                 284 security/tomoyo/network.c 	struct tomoyo_inet_acl e = { .head.type = TOMOYO_TYPE_INET_ACL };
e                 290 security/tomoyo/network.c 	for (e.protocol = 0; e.protocol < TOMOYO_SOCK_MAX; e.protocol++)
e                 291 security/tomoyo/network.c 		if (!strcmp(protocol, tomoyo_proto_keyword[e.protocol]))
e                 295 security/tomoyo/network.c 			e.perm |= 1 << type;
e                 296 security/tomoyo/network.c 	if (e.protocol == TOMOYO_SOCK_MAX || !e.perm)
e                 300 security/tomoyo/network.c 		e.address.group =
e                 302 security/tomoyo/network.c 		if (!e.address.group)
e                 305 security/tomoyo/network.c 		if (!tomoyo_parse_ipaddr_union(param, &e.address))
e                 308 security/tomoyo/network.c 	if (!tomoyo_parse_number_union(param, &e.port) ||
e                 309 security/tomoyo/network.c 	    e.port.values[1] > 65535)
e                 311 security/tomoyo/network.c 	error = tomoyo_update_domain(&e.head, sizeof(e), param,
e                 315 security/tomoyo/network.c 	tomoyo_put_group(e.address.group);
e                 316 security/tomoyo/network.c 	tomoyo_put_number_union(&e.port);
e                 329 security/tomoyo/network.c 	struct tomoyo_unix_acl e = { .head.type = TOMOYO_TYPE_UNIX_ACL };
e                 335 security/tomoyo/network.c 	for (e.protocol = 0; e.protocol < TOMOYO_SOCK_MAX; e.protocol++)
e                 336 security/tomoyo/network.c 		if (!strcmp(protocol, tomoyo_proto_keyword[e.protocol]))
e                 340 security/tomoyo/network.c 			e.perm |= 1 << type;
e                 341 security/tomoyo/network.c 	if (e.protocol == TOMOYO_SOCK_MAX || !e.perm)
e                 343 security/tomoyo/network.c 	if (!tomoyo_parse_name_union(param, &e.name))
e                 345 security/tomoyo/network.c 	error = tomoyo_update_domain(&e.head, sizeof(e), param,
e                 348 security/tomoyo/network.c 	tomoyo_put_name_union(&e.name);
e                 439 security/tomoyo/util.c 	unsigned char e;
e                 482 security/tomoyo/util.c 				e = *string++;
e                 483 security/tomoyo/util.c 				if (d < '0' || d > '7' || e < '0' || e > '7')
e                 485 security/tomoyo/util.c 				c = tomoyo_make_byte(c, d, e);
e                 125 sound/core/seq/oss/seq_oss_event.c 	switch (q->e.cmd) {
e                 127 sound/core/seq/oss/seq_oss_event.c 		return note_off_event(dp, q->e.dev, q->e.chn, q->e.p1, q->e.p2, ev);
e                 130 sound/core/seq/oss/seq_oss_event.c 		return note_on_event(dp, q->e.dev, q->e.chn, q->e.p1, q->e.p2, ev);
e                 133 sound/core/seq/oss/seq_oss_event.c 		return set_control_event(dp, q->e.dev, SNDRV_SEQ_EVENT_PGMCHANGE,
e                 134 sound/core/seq/oss/seq_oss_event.c 					 q->e.chn, 0, q->e.p1, ev);
e                 137 sound/core/seq/oss/seq_oss_event.c 		return set_control_event(dp, q->e.dev, SNDRV_SEQ_EVENT_CHANPRESS,
e                 138 sound/core/seq/oss/seq_oss_event.c 					 q->e.chn, 0, q->e.p1, ev);
e                 142 sound/core/seq/oss/seq_oss_event.c 		val = (char)q->e.p1;
e                 144 sound/core/seq/oss/seq_oss_event.c 		return set_control_event(dp, q->e.dev, SNDRV_SEQ_EVENT_CONTROLLER,
e                 145 sound/core/seq/oss/seq_oss_event.c 					 q->e.chn, CTL_PAN, val, ev);
e                 148 sound/core/seq/oss/seq_oss_event.c 		val = ((short)q->e.p3 << 8) | (short)q->e.p2;
e                 149 sound/core/seq/oss/seq_oss_event.c 		switch (q->e.p1) {
e                 152 sound/core/seq/oss/seq_oss_event.c 			return set_control_event(dp, q->e.dev,
e                 154 sound/core/seq/oss/seq_oss_event.c 						 q->e.chn, 0, val, ev);
e                 157 sound/core/seq/oss/seq_oss_event.c 			return set_control_event(dp, q->e.dev,
e                 159 sound/core/seq/oss/seq_oss_event.c 						 q->e.chn, 0, val*128/100, ev);
e                 161 sound/core/seq/oss/seq_oss_event.c 			return set_control_event(dp, q->e.dev,
e                 163 sound/core/seq/oss/seq_oss_event.c 						  q->e.chn, q->e.p1, val, ev);
e                 167 sound/core/seq/oss/seq_oss_event.c 		return snd_seq_oss_synth_raw_event(dp, q->e.dev, q->c, ev);
e                  85 sound/core/seq/oss/seq_oss_event.h 	struct evrec_extended e;
e                 160 sound/firewire/bebob/bebob_proc.c 	 void (*op)(struct snd_info_entry *e, struct snd_info_buffer *b))
e                 716 sound/firewire/bebob/bebob_stream.c 	unsigned int i, e, channels, format;
e                 737 sound/firewire/bebob/bebob_stream.c 	for (e = 0; e < buf[4]; e++) {
e                 738 sound/firewire/bebob/bebob_stream.c 		channels = buf[5 + e * 2];
e                 739 sound/firewire/bebob/bebob_stream.c 		format = buf[6 + e * 2];
e                  37 sound/firewire/fireface/ff-proc.c 		     void (*op)(struct snd_info_entry *e,
e                 196 sound/firewire/fireworks/fireworks_proc.c 	 void (*op)(struct snd_info_entry *e, struct snd_info_buffer *b))
e                  83 sound/firewire/motu/motu-proc.c 		     void (*op)(struct snd_info_entry *e,
e                  79 sound/firewire/oxfw/oxfw-proc.c 		     void (*op)(struct snd_info_entry *e,
e                 495 sound/firewire/oxfw/oxfw-stream.c 	unsigned int i, e, channels, type;
e                 517 sound/firewire/oxfw/oxfw-stream.c 	for (e = 0; e < format[4]; e++) {
e                 518 sound/firewire/oxfw/oxfw-stream.c 		channels = format[5 + e * 2];
e                 519 sound/firewire/oxfw/oxfw-stream.c 		type = format[6 + e * 2];
e                  54 sound/firewire/tascam/tascam-proc.c 		     void (*op)(struct snd_info_entry *e,
e                  17 sound/isa/gus/gus_volume.c 	unsigned short e, m, tmp;
e                  22 sound/isa/gus/gus_volume.c 	e = 7;
e                  24 sound/isa/gus/gus_volume.c 		while (e > 0 && tmp < (1 << e))
e                  25 sound/isa/gus/gus_volume.c 			e--;
e                  29 sound/isa/gus/gus_volume.c 			e++;
e                  32 sound/isa/gus/gus_volume.c 	m = vol - (1 << e);
e                  34 sound/isa/gus/gus_volume.c 		if (e > 8)
e                  35 sound/isa/gus/gus_volume.c 			m >>= e - 8;
e                  36 sound/isa/gus/gus_volume.c 		else if (e < 8)
e                  37 sound/isa/gus/gus_volume.c 			m <<= 8 - e;
e                  40 sound/isa/gus/gus_volume.c 	return (e << 8) | m;
e                  48 sound/isa/gus/gus_volume.c 	unsigned short e, m;
e                  52 sound/isa/gus/gus_volume.c 	e = gf1_vol >> 8;
e                  54 sound/isa/gus/gus_volume.c 	rvol = 1 << e;
e                  55 sound/isa/gus/gus_volume.c 	if (e > 8)
e                  56 sound/isa/gus/gus_volume.c 		return rvol | (m << (e - 8));
e                  57 sound/isa/gus/gus_volume.c 	return rvol | (m >> (8 - e));
e                 450 sound/pci/ac97/ac97_codec.c 	struct ac97_enum *e = (struct ac97_enum *)kcontrol->private_value;
e                 452 sound/pci/ac97/ac97_codec.c 	return snd_ctl_enum_info(uinfo, e->shift_l == e->shift_r ? 1 : 2,
e                 453 sound/pci/ac97/ac97_codec.c 				 e->mask, e->texts);
e                 460 sound/pci/ac97/ac97_codec.c 	struct ac97_enum *e = (struct ac97_enum *)kcontrol->private_value;
e                 463 sound/pci/ac97/ac97_codec.c 	for (bitmask = 1; bitmask < e->mask; bitmask <<= 1)
e                 465 sound/pci/ac97/ac97_codec.c 	val = snd_ac97_read_cache(ac97, e->reg);
e                 466 sound/pci/ac97/ac97_codec.c 	ucontrol->value.enumerated.item[0] = (val >> e->shift_l) & (bitmask - 1);
e                 467 sound/pci/ac97/ac97_codec.c 	if (e->shift_l != e->shift_r)
e                 468 sound/pci/ac97/ac97_codec.c 		ucontrol->value.enumerated.item[1] = (val >> e->shift_r) & (bitmask - 1);
e                 477 sound/pci/ac97/ac97_codec.c 	struct ac97_enum *e = (struct ac97_enum *)kcontrol->private_value;
e                 481 sound/pci/ac97/ac97_codec.c 	for (bitmask = 1; bitmask < e->mask; bitmask <<= 1)
e                 483 sound/pci/ac97/ac97_codec.c 	if (ucontrol->value.enumerated.item[0] > e->mask - 1)
e                 485 sound/pci/ac97/ac97_codec.c 	val = ucontrol->value.enumerated.item[0] << e->shift_l;
e                 486 sound/pci/ac97/ac97_codec.c 	mask = (bitmask - 1) << e->shift_l;
e                 487 sound/pci/ac97/ac97_codec.c 	if (e->shift_l != e->shift_r) {
e                 488 sound/pci/ac97/ac97_codec.c 		if (ucontrol->value.enumerated.item[1] > e->mask - 1)
e                 490 sound/pci/ac97/ac97_codec.c 		val |= ucontrol->value.enumerated.item[1] << e->shift_r;
e                 491 sound/pci/ac97/ac97_codec.c 		mask |= (bitmask - 1) << e->shift_r;
e                 493 sound/pci/ac97/ac97_codec.c 	return snd_ac97_update_bits(ac97, e->reg, mask, val);
e                 231 sound/pci/asihpi/asihpi.c 	u16 e;
e                 233 sound/pci/asihpi/asihpi.c 		e = hpi_outstream_get_info_ex(h_stream, pw_state,
e                 237 sound/pci/asihpi/asihpi.c 		e = hpi_instream_get_info_ex(h_stream, pw_state,
e                 240 sound/pci/asihpi/asihpi.c 	return e;
e                 587 sound/pci/asihpi/asihpi.c 	u16 e;
e                 626 sound/pci/asihpi/asihpi.c 				e = hpi_stream_group_add(
e                 629 sound/pci/asihpi/asihpi.c 				if (!e) {
e                 632 sound/pci/asihpi/asihpi.c 					hpi_handle_error(e);
e                2244 sound/pci/asihpi/asihpi.c 	u16 e;
e                2248 sound/pci/asihpi/asihpi.c 	e = hpi_multiplexer_query_source(h_control,
e                2251 sound/pci/asihpi/asihpi.c 	if (!e)
e                2460 sound/pci/asihpi/asihpi.c 	u16 e;
e                2462 sound/pci/asihpi/asihpi.c 	e = hpi_sample_clock_get_local_rate(h_control, &rate);
e                2463 sound/pci/asihpi/asihpi.c 	if (!e)
e                2502 sound/pci/asihpi/asihpi.c 	u16 e;
e                2504 sound/pci/asihpi/asihpi.c 	e = hpi_sample_clock_get_sample_rate(h_control, &rate);
e                2505 sound/pci/asihpi/asihpi.c 	if (!e)
e                2830 sound/pci/asihpi/hpifunc.c 	u16 e;
e                2832 sound/pci/asihpi/hpifunc.c 	e = hpi_control_query(h_volume, HPI_VOLUME_AUTOFADE, i, 0, &u);
e                2834 sound/pci/asihpi/hpifunc.c 	return e;
e                  48 sound/pci/au88x0/au88x0_a3d.c 		      short e)
e                  53 sound/pci/au88x0/au88x0_a3d.c 		(e << 0x10) | d);
e                  63 sound/pci/au88x0/au88x0_a3d.c 		       short e)
e                  68 sound/pci/au88x0/au88x0_a3d.c 		(e << 0x10) | d);
e                  89 sound/pci/au88x0/au88x0_a3d.c 		      short *d, short *e)
e                 308 sound/pci/ctxfi/ctmixer.c 	unsigned int e;
e                 313 sound/pci/ctxfi/ctmixer.c 	e = (x >> 10) & 0x7;
e                 316 sound/pci/ctxfi/ctmixer.c 	x >>= (7 - e);
e                 224 sound/pci/hda/hda_eld.c int snd_hdmi_parse_eld(struct hda_codec *codec, struct parsed_hdmi_eld *e,
e                 230 sound/pci/hda/hda_eld.c 	memset(e, 0, sizeof(*e));
e                 231 sound/pci/hda/hda_eld.c 	e->eld_ver = GRAB_BITS(buf, 0, 3, 5);
e                 232 sound/pci/hda/hda_eld.c 	if (e->eld_ver != ELD_VER_CEA_861D &&
e                 233 sound/pci/hda/hda_eld.c 	    e->eld_ver != ELD_VER_PARTIAL) {
e                 234 sound/pci/hda/hda_eld.c 		codec_info(codec, "HDMI: Unknown ELD version %d\n", e->eld_ver);
e                 238 sound/pci/hda/hda_eld.c 	e->baseline_len = GRAB_BITS(buf, 2, 0, 8);
e                 240 sound/pci/hda/hda_eld.c 	e->cea_edid_ver	= GRAB_BITS(buf, 4, 5, 3);
e                 242 sound/pci/hda/hda_eld.c 	e->support_hdcp	= GRAB_BITS(buf, 5, 0, 1);
e                 243 sound/pci/hda/hda_eld.c 	e->support_ai	= GRAB_BITS(buf, 5, 1, 1);
e                 244 sound/pci/hda/hda_eld.c 	e->conn_type	= GRAB_BITS(buf, 5, 2, 2);
e                 245 sound/pci/hda/hda_eld.c 	e->sad_count	= GRAB_BITS(buf, 5, 4, 4);
e                 247 sound/pci/hda/hda_eld.c 	e->aud_synch_delay = GRAB_BITS(buf, 6, 0, 8) * 2;
e                 248 sound/pci/hda/hda_eld.c 	e->spk_alloc	= GRAB_BITS(buf, 7, 0, 7);
e                 250 sound/pci/hda/hda_eld.c 	e->port_id	  = get_unaligned_le64(buf + 8);
e                 253 sound/pci/hda/hda_eld.c 	e->manufacture_id = get_unaligned_le16(buf + 16);
e                 254 sound/pci/hda/hda_eld.c 	e->product_id	  = get_unaligned_le16(buf + 18);
e                 263 sound/pci/hda/hda_eld.c 		strlcpy(e->monitor_name, buf + ELD_FIXED_BYTES, mnl + 1);
e                 265 sound/pci/hda/hda_eld.c 	for (i = 0; i < e->sad_count; i++) {
e                 270 sound/pci/hda/hda_eld.c 		hdmi_update_short_audio_desc(codec, e->sad + i,
e                 279 sound/pci/hda/hda_eld.c 	if (!e->spk_alloc)
e                 280 sound/pci/hda/hda_eld.c 		e->spk_alloc = 0xffff;
e                 396 sound/pci/hda/hda_eld.c void snd_hdmi_show_eld(struct hda_codec *codec, struct parsed_hdmi_eld *e)
e                 401 sound/pci/hda/hda_eld.c 			e->monitor_name,
e                 402 sound/pci/hda/hda_eld.c 			eld_connection_type_names[e->conn_type]);
e                 404 sound/pci/hda/hda_eld.c 	if (e->spk_alloc) {
e                 406 sound/pci/hda/hda_eld.c 		snd_hdac_print_channel_allocation(e->spk_alloc, buf, sizeof(buf));
e                 410 sound/pci/hda/hda_eld.c 	for (i = 0; i < e->sad_count; i++)
e                 411 sound/pci/hda/hda_eld.c 		hdmi_show_short_audio_desc(codec, e->sad + i);
e                 445 sound/pci/hda/hda_eld.c 	struct parsed_hdmi_eld *e = &eld->info;
e                 467 sound/pci/hda/hda_eld.c 	snd_iprintf(buffer, "monitor_name\t\t%s\n", e->monitor_name);
e                 469 sound/pci/hda/hda_eld.c 				eld_connection_type_names[e->conn_type]);
e                 470 sound/pci/hda/hda_eld.c 	snd_iprintf(buffer, "eld_version\t\t[0x%x] %s\n", e->eld_ver,
e                 471 sound/pci/hda/hda_eld.c 					eld_version_names[e->eld_ver]);
e                 472 sound/pci/hda/hda_eld.c 	snd_iprintf(buffer, "edid_version\t\t[0x%x] %s\n", e->cea_edid_ver,
e                 473 sound/pci/hda/hda_eld.c 				cea_edid_version_names[e->cea_edid_ver]);
e                 474 sound/pci/hda/hda_eld.c 	snd_iprintf(buffer, "manufacture_id\t\t0x%x\n", e->manufacture_id);
e                 475 sound/pci/hda/hda_eld.c 	snd_iprintf(buffer, "product_id\t\t0x%x\n", e->product_id);
e                 476 sound/pci/hda/hda_eld.c 	snd_iprintf(buffer, "port_id\t\t\t0x%llx\n", (long long)e->port_id);
e                 477 sound/pci/hda/hda_eld.c 	snd_iprintf(buffer, "support_hdcp\t\t%d\n", e->support_hdcp);
e                 478 sound/pci/hda/hda_eld.c 	snd_iprintf(buffer, "support_ai\t\t%d\n", e->support_ai);
e                 479 sound/pci/hda/hda_eld.c 	snd_iprintf(buffer, "audio_sync_delay\t%d\n", e->aud_synch_delay);
e                 481 sound/pci/hda/hda_eld.c 	snd_hdac_print_channel_allocation(e->spk_alloc, buf, sizeof(buf));
e                 482 sound/pci/hda/hda_eld.c 	snd_iprintf(buffer, "speakers\t\t[0x%x]%s\n", e->spk_alloc, buf);
e                 484 sound/pci/hda/hda_eld.c 	snd_iprintf(buffer, "sad_count\t\t%d\n", e->sad_count);
e                 486 sound/pci/hda/hda_eld.c 	for (i = 0; i < e->sad_count; i++)
e                 487 sound/pci/hda/hda_eld.c 		hdmi_print_sad_info(i, e->sad + i, buffer);
e                 493 sound/pci/hda/hda_eld.c 	struct parsed_hdmi_eld *e = &eld->info;
e                 513 sound/pci/hda/hda_eld.c 			e->conn_type = val;
e                 515 sound/pci/hda/hda_eld.c 			e->port_id = val;
e                 517 sound/pci/hda/hda_eld.c 			e->support_hdcp = val;
e                 519 sound/pci/hda/hda_eld.c 			e->support_ai = val;
e                 521 sound/pci/hda/hda_eld.c 			e->aud_synch_delay = val;
e                 523 sound/pci/hda/hda_eld.c 			e->spk_alloc = val;
e                 525 sound/pci/hda/hda_eld.c 			e->sad_count = val;
e                 536 sound/pci/hda/hda_eld.c 				e->sad[n].format = val;
e                 538 sound/pci/hda/hda_eld.c 				e->sad[n].channels = val;
e                 540 sound/pci/hda/hda_eld.c 				e->sad[n].rates = val;
e                 542 sound/pci/hda/hda_eld.c 				e->sad[n].sample_bits = val;
e                 544 sound/pci/hda/hda_eld.c 				e->sad[n].max_bitrate = val;
e                 546 sound/pci/hda/hda_eld.c 				e->sad[n].profile = val;
e                 547 sound/pci/hda/hda_eld.c 			if (n >= e->sad_count)
e                 548 sound/pci/hda/hda_eld.c 				e->sad_count = n + 1;
e                 555 sound/pci/hda/hda_eld.c void snd_hdmi_eld_update_pcm_info(struct parsed_hdmi_eld *e,
e                 572 sound/pci/hda/hda_eld.c 	for (i = 0; i < e->sad_count; i++) {
e                 573 sound/pci/hda/hda_eld.c 		struct cea_sad *a = &e->sad[i];
e                 700 sound/pci/hda/hda_local.h int snd_hdmi_parse_eld(struct hda_codec *codec, struct parsed_hdmi_eld *e,
e                 702 sound/pci/hda/hda_local.h void snd_hdmi_show_eld(struct hda_codec *codec, struct parsed_hdmi_eld *e);
e                 703 sound/pci/hda/hda_local.h void snd_hdmi_eld_update_pcm_info(struct parsed_hdmi_eld *e,
e                 990 sound/pci/oxygen/xonar_wm87x6.c #define WM8776_FIELD_CTL_VOLUME(a, b, c, d, e, f, g, h, tlv_p) { \
e                 991 sound/pci/oxygen/xonar_wm87x6.c 	_WM8776_FIELD_CTL(a " Capture Volume", b, c, d, e, f, g, h), \
e                 244 sound/pci/riptide/riptide.c #define SEND_SETF(p,b,c,d,e,f,g)   sendcmd(p,PARM,SETF|WORD1(b)|BYTE3(c),d|BYTE1(e)|BYTE2(f)|BYTE3(g),RET(0))	/* set sample format at mixer */
e                 251 sound/pci/riptide/riptide.c #define SEND_LSEL(p,b,c,d,e,f,g,h) sendcmd(p,PARM,LSEL|BYTE1(b)|BYTE2(c)|BYTE3(d),BYTE0(e)|BYTE1(f)|BYTE2(g)|BYTE3(h),RET(0))	/* select paths for internal connections */
e                 252 sound/pci/riptide/riptide.c #define SEND_SSRC(p,b,c,d,e)       sendcmd(p,PARM,SSRC|BYTE1(b)|WORD2(c),WORD0(d)|WORD2(e),RET(0))	/* configure source */
e                 256 sound/pci/riptide/riptide.c #define SEND_SDGV(p,b,c,d,e)       sendcmd(p,PARM,SDGV|BYTE2(b)|BYTE3(c),WORD0(d)|WORD2(e),RET(0))	/* set digital mixer */
e                 262 sound/pci/riptide/riptide.c #define SEND_TXAC(p,b,c,d,e,f)     sendcmd(p,PARM,TXAC|BYTE1(b)|WORD2(c),WORD0(d)|BYTE2(e)|BYTE3(f),RET(0))
e                  78 sound/soc/atmel/tse850-pcm5142.c 	struct soc_enum *e = (struct soc_enum *)kctrl->private_value;
e                  81 sound/soc/atmel/tse850-pcm5142.c 	if (val >= e->items)
e                 108 sound/soc/atmel/tse850-pcm5142.c 	struct soc_enum *e = (struct soc_enum *)kctrl->private_value;
e                 111 sound/soc/atmel/tse850-pcm5142.c 	if (val >= e->items)
e                 193 sound/soc/atmel/tse850-pcm5142.c 	struct soc_enum *e = (struct soc_enum *)kctrl->private_value;
e                 197 sound/soc/atmel/tse850-pcm5142.c 	if (uV >= e->items)
e                 192 sound/soc/codecs/adau17x1.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                 194 sound/soc/codecs/adau17x1.c 	unsigned int stream = e->shift_l;
e                 198 sound/soc/codecs/adau17x1.c 	if (ucontrol->value.enumerated.item[0] >= e->items)
e                 225 sound/soc/codecs/adau17x1.c 				ucontrol->value.enumerated.item[0], e, &update);
e                 236 sound/soc/codecs/adau17x1.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                 237 sound/soc/codecs/adau17x1.c 	unsigned int stream = e->shift_l;
e                 362 sound/soc/codecs/cpcap.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                 363 sound/soc/codecs/cpcap.c 	unsigned int shift = e->shift_l;
e                 407 sound/soc/codecs/cpcap.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                 409 sound/soc/codecs/cpcap.c 	unsigned int mask = BIT(e->shift_l);
e                 440 sound/soc/codecs/cpcap.c 	snd_soc_dapm_mux_update_power(dapm, kcontrol, muxval, e, NULL);
e                 491 sound/soc/codecs/cpcap.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                 524 sound/soc/codecs/cpcap.c 	snd_soc_dapm_mux_update_power(dapm, kcontrol, muxval, e, NULL);
e                 567 sound/soc/codecs/cpcap.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                 592 sound/soc/codecs/cpcap.c 	snd_soc_dapm_mux_update_power(dapm, kcontrol, muxval, e, NULL);
e                1021 sound/soc/codecs/cs43130.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                1027 sound/soc/codecs/cs43130.c 	if (item[0] >= e->items)
e                1029 sound/soc/codecs/cs43130.c 	val = snd_soc_enum_item_to_val(e, item[0]) << e->shift_l;
e                  62 sound/soc/codecs/cs47l92.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                  67 sound/soc/codecs/cs47l92.c 	if (ucontrol->value.enumerated.item[0] > e->items - 1)
e                  74 sound/soc/codecs/cs47l92.c 	ep_sel = mux << e->shift_l;
e                 122 sound/soc/codecs/cs47l92.c 	return snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL);
e                 903 sound/soc/codecs/hdac_hdmi.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                 910 sound/soc/codecs/hdac_hdmi.c 	const char *cvt_name =  e->texts[ucontrol->value.enumerated.item[0]];
e                 500 sound/soc/codecs/madera.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                 505 sound/soc/codecs/madera.c 	if (ucontrol->value.enumerated.item[0] > e->items - 1)
e                 571 sound/soc/codecs/madera.c 	return snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL);
e                 605 sound/soc/codecs/madera.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                 615 sound/soc/codecs/madera.c 	val = mux << e->shift_l;
e                 616 sound/soc/codecs/madera.c 	mask = (e->mask << e->shift_l) | MADERA_IN1L_SRC_SE_MASK;
e                 618 sound/soc/codecs/madera.c 	switch (e->reg) {
e                 639 sound/soc/codecs/madera.c 		mux, e->reg, inmode, mask, val);
e                 641 sound/soc/codecs/madera.c 	ret = regmap_update_bits_check(regmap, e->reg, mask, val, &changed);
e                 647 sound/soc/codecs/madera.c 						     mux, e, NULL);
e                 826 sound/soc/codecs/madera.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                 828 sound/soc/codecs/madera.c 	const int adsp_num = e->shift_l;
e                 835 sound/soc/codecs/madera.c 	item = snd_soc_enum_val_to_item(e, cached_rate);
e                 847 sound/soc/codecs/madera.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                 848 sound/soc/codecs/madera.c 	const int adsp_num = e->shift_l;
e                 852 sound/soc/codecs/madera.c 	if (item >= e->items)
e                 869 sound/soc/codecs/madera.c 		priv->adsp_rate_cache[adsp_num] = e->values[item];
e                1007 sound/soc/codecs/madera.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                1012 sound/soc/codecs/madera.c 	if (item >= e->items)
e                1021 sound/soc/codecs/madera.c 	ret = snd_soc_component_read(component, e->reg, &val);
e                1024 sound/soc/codecs/madera.c 			 e->reg, ret);
e                1027 sound/soc/codecs/madera.c 	val >>= e->shift_l;
e                1028 sound/soc/codecs/madera.c 	val &= e->mask;
e                1029 sound/soc/codecs/madera.c 	if (snd_soc_enum_item_to_val(e, item) == val) {
e                1034 sound/soc/codecs/madera.c 	if (!madera_can_change_grp_rate(priv, e->reg)) {
e                2119 sound/soc/codecs/madera.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                2120 sound/soc/codecs/madera.c 	unsigned int reg = e->reg;
e                 327 sound/soc/codecs/twl6040.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                 331 sound/soc/codecs/twl6040.c 	val = twl6040_read(component, e->reg);
e                1285 sound/soc/codecs/wcd9335.c 	struct soc_enum *e = (struct soc_enum *)kc->private_value;
e                1317 sound/soc/codecs/wcd9335.c 				      e, update);
e                1476 sound/soc/codecs/wcd9335.c 	struct soc_enum *e = (struct soc_enum *)kc->private_value;
e                1481 sound/soc/codecs/wcd9335.c 	switch (e->reg) {
e                1525 sound/soc/codecs/wcd9335.c 	struct soc_enum *e = (struct soc_enum *)kc->private_value;
e                1532 sound/soc/codecs/wcd9335.c 	if (e->reg == WCD9335_CDC_RX0_RX_PATH_SEC0)
e                1534 sound/soc/codecs/wcd9335.c 	else if (e->reg == WCD9335_CDC_RX1_RX_PATH_SEC0)
e                1536 sound/soc/codecs/wcd9335.c 	else if (e->reg == WCD9335_CDC_RX2_RX_PATH_SEC0)
e                 163 sound/soc/codecs/wm8804.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                 164 sound/soc/codecs/wm8804.c 	unsigned int val = ucontrol->value.enumerated.item[0] << e->shift_l;
e                 165 sound/soc/codecs/wm8804.c 	unsigned int mask = 1 << e->shift_l;
e                 173 sound/soc/codecs/wm8804.c 	if (snd_soc_component_test_bits(component, e->reg, mask, val)) {
e                 181 sound/soc/codecs/wm8804.c 		snd_soc_component_update_bits(component, e->reg, mask, val);
e                 108 sound/soc/codecs/wm8998.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                 116 sound/soc/codecs/wm8998.c 	switch (e->reg) {
e                 140 sound/soc/codecs/wm8998.c 	snd_soc_component_update_bits(component, e->reg,
e                 147 sound/soc/codecs/wm8998.c 					     e, NULL);
e                 779 sound/soc/codecs/wm_adsp.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                 782 sound/soc/codecs/wm_adsp.c 	ucontrol->value.enumerated.item[0] = dsp[e->shift_l].fw;
e                 792 sound/soc/codecs/wm_adsp.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                 796 sound/soc/codecs/wm_adsp.c 	if (ucontrol->value.enumerated.item[0] == dsp[e->shift_l].fw)
e                 802 sound/soc/codecs/wm_adsp.c 	mutex_lock(&dsp[e->shift_l].pwr_lock);
e                 804 sound/soc/codecs/wm_adsp.c 	if (dsp[e->shift_l].booted || !list_empty(&dsp[e->shift_l].compr_list))
e                 807 sound/soc/codecs/wm_adsp.c 		dsp[e->shift_l].fw = ucontrol->value.enumerated.item[0];
e                 809 sound/soc/codecs/wm_adsp.c 	mutex_unlock(&dsp[e->shift_l].pwr_lock);
e                 116 sound/soc/fsl/fsl_audmix.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                 128 sound/soc/fsl/fsl_audmix.c 	val = snd_soc_enum_item_to_val(e, item[0]);
e                 158 sound/soc/fsl/fsl_audmix.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                 176 sound/soc/fsl/fsl_audmix.c 	val = snd_soc_enum_item_to_val(e, item[0]);
e                 130 sound/soc/intel/atom/sst-atom-controls.c 	struct sst_enum *e = (struct sst_enum *)kcontrol->private_value;
e                 134 sound/soc/intel/atom/sst-atom-controls.c 	uinfo->value.enumerated.items = e->max;
e                 136 sound/soc/intel/atom/sst-atom-controls.c 	if (uinfo->value.enumerated.item > e->max - 1)
e                 137 sound/soc/intel/atom/sst-atom-controls.c 		uinfo->value.enumerated.item = e->max - 1;
e                 139 sound/soc/intel/atom/sst-atom-controls.c 		e->texts[uinfo->value.enumerated.item]);
e                 154 sound/soc/intel/atom/sst-atom-controls.c 	struct sst_enum *e = (void *)kcontrol->private_value;
e                 157 sound/soc/intel/atom/sst-atom-controls.c 	unsigned int ctl_no = e->reg;
e                 158 sound/soc/intel/atom/sst-atom-controls.c 	unsigned int is_tx = e->tx;
e                 165 sound/soc/intel/atom/sst-atom-controls.c 	for (mux = e->max; mux > 0;  mux--)
e                 174 sound/soc/intel/atom/sst-atom-controls.c 			 e->texts[mux], mux ? map[mux - 1] : -1);
e                 185 sound/soc/intel/atom/sst-atom-controls.c 	struct sst_enum *e = (void *)kcontrol->private_value;
e                 188 sound/soc/intel/atom/sst-atom-controls.c 	if (e->w && e->w->power)
e                 190 sound/soc/intel/atom/sst-atom-controls.c 	else if (!e->w)
e                 214 sound/soc/intel/atom/sst-atom-controls.c 	struct sst_enum *e = (void *)kcontrol->private_value;
e                 216 sound/soc/intel/atom/sst-atom-controls.c 	unsigned int ctl_no = e->reg;
e                 217 sound/soc/intel/atom/sst-atom-controls.c 	unsigned int is_tx = e->tx;
e                 226 sound/soc/intel/atom/sst-atom-controls.c 	if (mux > e->max - 1)
e                 231 sound/soc/intel/atom/sst-atom-controls.c 	for (i = 0; i < e->max; i++)
e                 248 sound/soc/intel/atom/sst-atom-controls.c 			e->texts[mux], map[slot_channel_no]);
e                1442 sound/soc/intel/atom/sst-atom-controls.c 			struct sst_enum *e = (void *)kctl->private_value;
e                1444 sound/soc/intel/atom/sst-atom-controls.c 			e->w = w;
e                1447 sound/soc/intel/atom/sst-atom-controls.c 			struct sst_enum *e = (void *)kctl->private_value;
e                1449 sound/soc/intel/atom/sst-atom-controls.c 			e->w = w;
e                1098 sound/soc/mediatek/common/mtk-btcvsd.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                1100 sound/soc/mediatek/common/mtk-btcvsd.c 	if (ucontrol->value.enumerated.item[0] >= e->items)
e                 215 sound/soc/mediatek/mt8183/mt8183-dai-adda.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                 217 sound/soc/mediatek/mt8183/mt8183-dai-adda.c 	if (ucontrol->value.enumerated.item[0] >= e->items)
e                 142 sound/soc/mediatek/mt8183/mt8183-dai-i2s.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                 145 sound/soc/mediatek/mt8183/mt8183-dai-i2s.c 	if (ucontrol->value.enumerated.item[0] >= e->items)
e                 111 sound/soc/meson/g12a-tohdmitx.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                 128 sound/soc/meson/g12a-tohdmitx.c 	snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL);
e                 164 sound/soc/meson/g12a-tohdmitx.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                 179 sound/soc/meson/g12a-tohdmitx.c 	snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL);
e                 355 sound/soc/soc-dapm.c 	struct soc_enum *e;
e                 412 sound/soc/soc-dapm.c 		e = (struct soc_enum *)kcontrol->private_value;
e                 414 sound/soc/soc-dapm.c 		if (e->autodisable) {
e                 425 sound/soc/soc-dapm.c 			template.reg = e->reg;
e                 426 sound/soc/soc-dapm.c 			template.mask = e->mask;
e                 427 sound/soc/soc-dapm.c 			template.shift = e->shift_l;
e                 428 sound/soc/soc-dapm.c 			template.off_val = snd_soc_enum_item_to_val(e, 0);
e                 753 sound/soc/soc-dapm.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                 757 sound/soc/soc-dapm.c 	if (e->reg != SND_SOC_NOPM) {
e                 758 sound/soc/soc-dapm.c 		soc_dapm_read(dapm, e->reg, &val);
e                 759 sound/soc/soc-dapm.c 		val = (val >> e->shift_l) & e->mask;
e                 760 sound/soc/soc-dapm.c 		item = snd_soc_enum_val_to_item(e, val);
e                 771 sound/soc/soc-dapm.c 	i = match_string(e->texts, e->items, control_name);
e                 775 sound/soc/soc-dapm.c 	path->name = e->texts[i];
e                2270 sound/soc/soc-dapm.c 				 struct snd_kcontrol *kcontrol, int mux, struct soc_enum *e)
e                2282 sound/soc/soc-dapm.c 		if (e && !(strcmp(path->name, e->texts[mux])))
e                2297 sound/soc/soc-dapm.c 	struct snd_kcontrol *kcontrol, int mux, struct soc_enum *e,
e                2305 sound/soc/soc-dapm.c 	ret = soc_dapm_mux_update_power(card, kcontrol, mux, e);
e                3437 sound/soc/soc-dapm.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                3441 sound/soc/soc-dapm.c 	if (e->reg != SND_SOC_NOPM && dapm_kcontrol_is_powered(kcontrol)) {
e                3442 sound/soc/soc-dapm.c 		int ret = soc_dapm_read(dapm, e->reg, &reg_val);
e                3452 sound/soc/soc-dapm.c 	val = (reg_val >> e->shift_l) & e->mask;
e                3453 sound/soc/soc-dapm.c 	ucontrol->value.enumerated.item[0] = snd_soc_enum_val_to_item(e, val);
e                3454 sound/soc/soc-dapm.c 	if (e->shift_l != e->shift_r) {
e                3455 sound/soc/soc-dapm.c 		val = (reg_val >> e->shift_r) & e->mask;
e                3456 sound/soc/soc-dapm.c 		val = snd_soc_enum_val_to_item(e, val);
e                3478 sound/soc/soc-dapm.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                3485 sound/soc/soc-dapm.c 	if (item[0] >= e->items)
e                3488 sound/soc/soc-dapm.c 	val = snd_soc_enum_item_to_val(e, item[0]) << e->shift_l;
e                3489 sound/soc/soc-dapm.c 	mask = e->mask << e->shift_l;
e                3490 sound/soc/soc-dapm.c 	if (e->shift_l != e->shift_r) {
e                3491 sound/soc/soc-dapm.c 		if (item[1] > e->items)
e                3493 sound/soc/soc-dapm.c 		val |= snd_soc_enum_item_to_val(e, item[1]) << e->shift_r;
e                3494 sound/soc/soc-dapm.c 		mask |= e->mask << e->shift_r;
e                3501 sound/soc/soc-dapm.c 	if (e->reg != SND_SOC_NOPM)
e                3502 sound/soc/soc-dapm.c 		reg_change = soc_dapm_test_bits(dapm, e->reg, mask, val);
e                3507 sound/soc/soc-dapm.c 			update.reg = e->reg;
e                3514 sound/soc/soc-dapm.c 		ret = soc_dapm_mux_update_power(card, kcontrol, item[0], e);
e                  43 sound/soc/soc-ops.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                  45 sound/soc/soc-ops.c 	return snd_ctl_enum_info(uinfo, e->shift_l == e->shift_r ? 1 : 2,
e                  46 sound/soc/soc-ops.c 				 e->items, e->texts);
e                  63 sound/soc/soc-ops.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                  68 sound/soc/soc-ops.c 	ret = snd_soc_component_read(component, e->reg, &reg_val);
e                  71 sound/soc/soc-ops.c 	val = (reg_val >> e->shift_l) & e->mask;
e                  72 sound/soc/soc-ops.c 	item = snd_soc_enum_val_to_item(e, val);
e                  74 sound/soc/soc-ops.c 	if (e->shift_l != e->shift_r) {
e                  75 sound/soc/soc-ops.c 		val = (reg_val >> e->shift_r) & e->mask;
e                  76 sound/soc/soc-ops.c 		item = snd_soc_enum_val_to_item(e, val);
e                  97 sound/soc/soc-ops.c 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
e                 102 sound/soc/soc-ops.c 	if (item[0] >= e->items)
e                 104 sound/soc/soc-ops.c 	val = snd_soc_enum_item_to_val(e, item[0]) << e->shift_l;
e                 105 sound/soc/soc-ops.c 	mask = e->mask << e->shift_l;
e                 106 sound/soc/soc-ops.c 	if (e->shift_l != e->shift_r) {
e                 107 sound/soc/soc-ops.c 		if (item[1] >= e->items)
e                 109 sound/soc/soc-ops.c 		val |= snd_soc_enum_item_to_val(e, item[1]) << e->shift_r;
e                 110 sound/soc/soc-ops.c 		mask |= e->mask << e->shift_r;
e                 113 sound/soc/soc-ops.c 	return snd_soc_component_update_bits(component, e->reg, mask, val);
e                 274 tools/arch/powerpc/include/uapi/asm/kvm.h 		} e;
e                 259 tools/bpf/bpftool/cfg.c 	struct edge_node *e;
e                 261 tools/bpf/bpftool/cfg.c 	e = calloc(1, sizeof(*e));
e                 262 tools/bpf/bpftool/cfg.c 	if (!e) {
e                 268 tools/bpf/bpftool/cfg.c 		e->src = src;
e                 270 tools/bpf/bpftool/cfg.c 		e->dst = dst;
e                 272 tools/bpf/bpftool/cfg.c 	e->flags |= flags;
e                 274 tools/bpf/bpftool/cfg.c 	return e;
e                 280 tools/bpf/bpftool/cfg.c 	struct edge_node *e;
e                 284 tools/bpf/bpftool/cfg.c 	e = new_edge(bb, bb_next(bb), EDGE_FLAG_FALLTHROUGH);
e                 285 tools/bpf/bpftool/cfg.c 	if (!e)
e                 287 tools/bpf/bpftool/cfg.c 	list_add_tail(&e->l, &bb->e_succs);
e                 290 tools/bpf/bpftool/cfg.c 	e = new_edge(bb_prev(bb), bb, EDGE_FLAG_FALLTHROUGH);
e                 291 tools/bpf/bpftool/cfg.c 	if (!e)
e                 293 tools/bpf/bpftool/cfg.c 	list_add_tail(&e->l, &bb->e_prevs);
e                 298 tools/bpf/bpftool/cfg.c 		e = new_edge(bb, NULL, EDGE_FLAG_EMPTY);
e                 299 tools/bpf/bpftool/cfg.c 		if (!e)
e                 301 tools/bpf/bpftool/cfg.c 		e->src = bb;
e                 306 tools/bpf/bpftool/cfg.c 			e->dst = bb_next(bb);
e                 307 tools/bpf/bpftool/cfg.c 			e->flags |= EDGE_FLAG_FALLTHROUGH;
e                 308 tools/bpf/bpftool/cfg.c 			list_add_tail(&e->l, &bb->e_succs);
e                 311 tools/bpf/bpftool/cfg.c 			e->dst = func_search_bb_with_head(func,
e                 313 tools/bpf/bpftool/cfg.c 			e->flags |= EDGE_FLAG_JUMP;
e                 314 tools/bpf/bpftool/cfg.c 			list_add_tail(&e->l, &bb->e_succs);
e                 318 tools/bpf/bpftool/cfg.c 		e->dst = bb_next(bb);
e                 319 tools/bpf/bpftool/cfg.c 		e->flags |= EDGE_FLAG_FALLTHROUGH;
e                 320 tools/bpf/bpftool/cfg.c 		list_add_tail(&e->l, &bb->e_succs);
e                 322 tools/bpf/bpftool/cfg.c 		e = new_edge(bb, NULL, EDGE_FLAG_JUMP);
e                 323 tools/bpf/bpftool/cfg.c 		if (!e)
e                 325 tools/bpf/bpftool/cfg.c 		e->src = bb;
e                 326 tools/bpf/bpftool/cfg.c 		e->dst = func_search_bb_with_head(func, insn + insn->off + 1);
e                 327 tools/bpf/bpftool/cfg.c 		list_add_tail(&e->l, &bb->e_succs);
e                 362 tools/bpf/bpftool/cfg.c 			struct edge_node *e, *e2;
e                 364 tools/bpf/bpftool/cfg.c 			list_for_each_entry_safe(e, e2, &bb->e_prevs, l) {
e                 365 tools/bpf/bpftool/cfg.c 				list_del(&e->l);
e                 366 tools/bpf/bpftool/cfg.c 				free(e);
e                 369 tools/bpf/bpftool/cfg.c 			list_for_each_entry_safe(e, e2, &bb->e_succs, l) {
e                 370 tools/bpf/bpftool/cfg.c 				list_del(&e->l);
e                 371 tools/bpf/bpftool/cfg.c 				free(e);
e                 419 tools/bpf/bpftool/cfg.c 	struct edge_node *e;
e                 425 tools/bpf/bpftool/cfg.c 	list_for_each_entry(e, &bb->e_succs, l) {
e                 427 tools/bpf/bpftool/cfg.c 		       func_idx, e->src->idx, func_idx, e->dst->idx,
e                  68 tools/bpf/bpftool/map_perf_ring.c 	struct perf_event_sample *e = container_of(event,
e                  80 tools/bpf/bpftool/map_perf_ring.c 		jsonw_uint(json_wtr, e->header.type);
e                  85 tools/bpf/bpftool/map_perf_ring.c 		if (e->header.type == PERF_RECORD_SAMPLE) {
e                  87 tools/bpf/bpftool/map_perf_ring.c 			jsonw_uint(json_wtr, e->time);
e                  89 tools/bpf/bpftool/map_perf_ring.c 			print_data_json(e->data, e->size);
e                  90 tools/bpf/bpftool/map_perf_ring.c 		} else if (e->header.type == PERF_RECORD_LOST) {
e                 101 tools/bpf/bpftool/map_perf_ring.c 		if (e->header.type == PERF_RECORD_SAMPLE) {
e                 103 tools/bpf/bpftool/map_perf_ring.c 			       e->time / 1000000000ULL, e->time % 1000000000ULL,
e                 105 tools/bpf/bpftool/map_perf_ring.c 			fprint_hex(stdout, e->data, e->size, " ");
e                 107 tools/bpf/bpftool/map_perf_ring.c 		} else if (e->header.type == PERF_RECORD_LOST) {
e                 111 tools/bpf/bpftool/map_perf_ring.c 			       e->header.type, e->header.size);
e                   7 tools/build/tests/ex/ex.c int e(void);
e                  17 tools/build/tests/ex/ex.c 	e();
e                   9 tools/include/linux/bug.h #define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
e                  37 tools/include/linux/irqflags.h #define trace_lock_acquire(a, b, c, d, e, f, g)
e                  39 tools/include/linux/kernel.h #define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
e                1654 tools/include/nolibc/nolibc.h 		fd_set *r, *w, *e;
e                1656 tools/include/nolibc/nolibc.h 	} arg = { .n = nfds, .r = rfds, .w = wfds, .e = efds, .t = timeout };
e                4729 tools/lib/bpf/libbpf.c 	struct bpf_map *s, *e;
e                4735 tools/lib/bpf/libbpf.c 	e = obj->maps + obj->nr_maps;
e                4737 tools/lib/bpf/libbpf.c 	if ((m < s) || (m >= e)) {
e                5599 tools/lib/bpf/libbpf.c perf_buffer__process_record(struct perf_event_header *e, void *ctx)
e                5603 tools/lib/bpf/libbpf.c 	void *data = e;
e                5607 tools/lib/bpf/libbpf.c 		return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
e                5609 tools/lib/bpf/libbpf.c 	switch (e->type) {
e                5625 tools/lib/bpf/libbpf.c 		pr_warning("unknown perf sample type %d\n", e->type);
e                  16 tools/lib/bpf/libbpf_errno.c #define ERRNO_OFFSET(e)		((e) - __LIBBPF_ERRNO__START)
e                 459 tools/perf/builtin-diff.c 	struct evsel *e;
e                 461 tools/perf/builtin-diff.c 	evlist__for_each_entry(evlist, e) {
e                 462 tools/perf/builtin-diff.c 		if (perf_evsel__match2(evsel, e))
e                 463 tools/perf/builtin-diff.c 			return e;
e                  52 tools/perf/builtin-mem.c 		struct perf_mem_event *e = &perf_mem_events[j];
e                  55 tools/perf/builtin-mem.c 			e->tag,
e                  58 tools/perf/builtin-mem.c 			e->supported ? ": available" : "");
e                 238 tools/perf/builtin-stat.c #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
e                2175 tools/perf/builtin-trace.c 			   *e = errno_to_name(evsel, -ret);
e                2177 tools/perf/builtin-trace.c 		fprintf(trace->output, "-1 %s (%s)", e, emsg);
e                 182 tools/perf/lib/evlist.c #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
e                  40 tools/perf/lib/evsel.c #define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
e                 111 tools/perf/pmu-events/jevents.c 	char *e = s + strlen(s);
e                 114 tools/perf/pmu-events/jevents.c 	--e;
e                 115 tools/perf/pmu-events/jevents.c 	while (e >= s && isspace(*e))
e                 116 tools/perf/pmu-events/jevents.c 		--e;
e                 117 tools/perf/pmu-events/jevents.c 	if (*e == '.')
e                 118 tools/perf/pmu-events/jevents.c 		*e = 0;
e                 257 tools/perf/pmu-events/jevents.c #define EXPECT(e, t, m) do { if (!(e)) {			\
e                   9 tools/perf/tests/expr.c static int test(struct parse_ctx *ctx, const char *e, double val2)
e                  13 tools/perf/tests/expr.c 	if (expr__parse(&val, ctx, &e))
e                  47 tools/perf/tests/kmod-path.c #define M(path, c, e) \
e                  48 tools/perf/tests/kmod-path.c 	TEST_ASSERT_VAL("failed", !test_is_kernel_module(path, c, e))
e                1769 tools/perf/tests/parse-events.c static int test_event(struct evlist_test *e)
e                1775 tools/perf/tests/parse-events.c 	if (e->valid && !e->valid()) {
e                1784 tools/perf/tests/parse-events.c 	ret = parse_events(evlist, e->name, &err);
e                1787 tools/perf/tests/parse-events.c 			 e->name, ret, err.str);
e                1788 tools/perf/tests/parse-events.c 		parse_events_print_error(&err, e->name);
e                1790 tools/perf/tests/parse-events.c 		ret = e->check(evlist);
e                1804 tools/perf/tests/parse-events.c 		struct evlist_test *e = &events[i];
e                1806 tools/perf/tests/parse-events.c 		pr_debug("running test %d '%s'", e->id, e->name);
e                1807 tools/perf/tests/parse-events.c 		ret1 = test_event(e);
e                1892 tools/perf/tests/parse-events.c 		struct evlist_test e = { .id = 0, };
e                1901 tools/perf/tests/parse-events.c 		e.name  = name;
e                1902 tools/perf/tests/parse-events.c 		e.check = test__checkevent_pmu_events;
e                1904 tools/perf/tests/parse-events.c 		ret = test_event(&e);
e                1908 tools/perf/tests/parse-events.c 		e.name  = name;
e                1909 tools/perf/tests/parse-events.c 		e.check = test__checkevent_pmu_events_mix;
e                1910 tools/perf/tests/parse-events.c 		ret = test_event(&e);
e                 174 tools/perf/tests/time-utils-test.c 		u64 e = 8000000000000000ULL;
e                 179 tools/perf/tests/time-utils-test.c 			.ptime = { {b, b + 1}, {c, c + 123}, {e, e + 5}, },
e                 181 tools/perf/tests/time-utils-test.c 			.skip = { b - 1, b + 2, c - 1, c + 124, e - 1, e + 6 },
e                 182 tools/perf/tests/time-utils-test.c 			.noskip = { b, b + 1, c, c + 123, e, e + 5 },
e                1174 tools/perf/util/auxtrace.c 	struct perf_record_auxtrace_error *e = &event->auxtrace_error;
e                1175 tools/perf/util/auxtrace.c 	unsigned long long nsecs = e->time;
e                1176 tools/perf/util/auxtrace.c 	const char *msg = e->msg;
e                1180 tools/perf/util/auxtrace.c 		      auxtrace_error_name(e->type), e->type);
e                1182 tools/perf/util/auxtrace.c 	if (e->fmt && nsecs) {
e                1191 tools/perf/util/auxtrace.c 	if (!e->fmt)
e                1192 tools/perf/util/auxtrace.c 		msg = (const char *)&e->time;
e                1195 tools/perf/util/auxtrace.c 		       e->cpu, e->pid, e->tid, e->ip, e->code, msg);
e                1202 tools/perf/util/auxtrace.c 	struct perf_record_auxtrace_error *e = &event->auxtrace_error;
e                1204 tools/perf/util/auxtrace.c 	if (e->type < PERF_AUXTRACE_ERROR_MAX)
e                1205 tools/perf/util/auxtrace.c 		session->evlist->stats.nr_auxtrace_errors[e->type] += 1;
e                1612 tools/perf/util/bpf-loader.c #define ERRNO_OFFSET(e)		((e) - __BPF_LOADER_ERRNO__START)
e                 208 tools/perf/util/cgroup.c 	const char *p, *e, *eos = str + strlen(str);
e                 219 tools/perf/util/cgroup.c 		e = p ? p : eos;
e                 222 tools/perf/util/cgroup.c 		if (e - str) {
e                 224 tools/perf/util/cgroup.c 			s = strndup(str, e - str);
e                  52 tools/perf/util/evlist.c #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
e                  53 tools/perf/util/evlist.c #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
e                 103 tools/perf/util/evsel.c #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
e                 159 tools/perf/util/genelf.c jit_add_eh_frame_info(Elf *e, void* unwinding, uint64_t unwinding_header_size,
e                 170 tools/perf/util/genelf.c 	scn = elf_newscn(e);
e                 204 tools/perf/util/genelf.c 	scn = elf_newscn(e);
e                 251 tools/perf/util/genelf.c 	Elf *e;
e                 266 tools/perf/util/genelf.c 	e = elf_begin(fd, ELF_C_WRITE, NULL);
e                 267 tools/perf/util/genelf.c 	if (!e) {
e                 275 tools/perf/util/genelf.c 	ehdr = elf_newehdr(e);
e                 292 tools/perf/util/genelf.c 	scn = elf_newscn(e);
e                 328 tools/perf/util/genelf.c 		retval = jit_add_eh_frame_info(e, unwinding,
e                 338 tools/perf/util/genelf.c 	scn = elf_newscn(e);
e                 374 tools/perf/util/genelf.c 	scn = elf_newscn(e);
e                 417 tools/perf/util/genelf.c 	scn = elf_newscn(e);
e                 450 tools/perf/util/genelf.c 	scn = elf_newscn(e);
e                 493 tools/perf/util/genelf.c 		retval = jit_add_debug_info(e, load_addr, debug, nr_debug_entries);
e                 499 tools/perf/util/genelf.c 		if (elf_update(e, ELF_C_WRITE) < 0) {
e                 507 tools/perf/util/genelf.c 	(void)elf_end(e);
e                  11 tools/perf/util/genelf.h int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_entries);
e                 484 tools/perf/util/genelf_debug.c jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_entries)
e                 502 tools/perf/util/genelf_debug.c 	scn = elf_newscn(e);
e                 536 tools/perf/util/genelf_debug.c 	scn = elf_newscn(e);
e                 570 tools/perf/util/genelf_debug.c 	scn = elf_newscn(e);
e                 604 tools/perf/util/genelf_debug.c 	if (elf_update(e, ELF_C_WRITE) < 0) {
e                 471 tools/perf/util/intel-pt.c 	struct intel_pt_cache_entry *e;
e                 477 tools/perf/util/intel-pt.c 	e = auxtrace_cache__alloc_entry(c);
e                 478 tools/perf/util/intel-pt.c 	if (!e)
e                 481 tools/perf/util/intel-pt.c 	e->insn_cnt = insn_cnt;
e                 482 tools/perf/util/intel-pt.c 	e->byte_cnt = byte_cnt;
e                 483 tools/perf/util/intel-pt.c 	e->op = intel_pt_insn->op;
e                 484 tools/perf/util/intel-pt.c 	e->branch = intel_pt_insn->branch;
e                 485 tools/perf/util/intel-pt.c 	e->length = intel_pt_insn->length;
e                 486 tools/perf/util/intel-pt.c 	e->rel = intel_pt_insn->rel;
e                 487 tools/perf/util/intel-pt.c 	memcpy(e->insn, intel_pt_insn->buf, INTEL_PT_INSN_BUF_SZ);
e                 489 tools/perf/util/intel-pt.c 	err = auxtrace_cache__add(c, offset, &e->entry);
e                 491 tools/perf/util/intel-pt.c 		auxtrace_cache__free_entry(c, e);
e                 557 tools/perf/util/intel-pt.c 			struct intel_pt_cache_entry *e;
e                 559 tools/perf/util/intel-pt.c 			e = intel_pt_cache_lookup(al.map->dso, machine, offset);
e                 560 tools/perf/util/intel-pt.c 			if (e &&
e                 561 tools/perf/util/intel-pt.c 			    (!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) {
e                 562 tools/perf/util/intel-pt.c 				*insn_cnt_ptr = e->insn_cnt;
e                 563 tools/perf/util/intel-pt.c 				*ip += e->byte_cnt;
e                 564 tools/perf/util/intel-pt.c 				intel_pt_insn->op = e->op;
e                 565 tools/perf/util/intel-pt.c 				intel_pt_insn->branch = e->branch;
e                 566 tools/perf/util/intel-pt.c 				intel_pt_insn->length = e->length;
e                 567 tools/perf/util/intel-pt.c 				intel_pt_insn->rel = e->rel;
e                 568 tools/perf/util/intel-pt.c 				memcpy(intel_pt_insn->buf, e->insn,
e                 626 tools/perf/util/intel-pt.c 		struct intel_pt_cache_entry *e;
e                 628 tools/perf/util/intel-pt.c 		e = intel_pt_cache_lookup(al.map->dso, machine, start_offset);
e                 629 tools/perf/util/intel-pt.c 		if (e)
e                  64 tools/perf/util/mem-events.c 			struct perf_mem_event *e = &perf_mem_events[j];
e                  66 tools/perf/util/mem-events.c 			if (strstr(e->tag, tok))
e                  67 tools/perf/util/mem-events.c 				e->record = found = true;
e                  93 tools/perf/util/mem-events.c 		struct perf_mem_event *e = &perf_mem_events[j];
e                  97 tools/perf/util/mem-events.c 			  mnt, e->sysfs_name);
e                 100 tools/perf/util/mem-events.c 			e->supported = found = true;
e                  21 tools/perf/util/mem2node.c 	struct phys_entry *e;
e                  25 tools/perf/util/mem2node.c 		e = rb_entry(parent, struct phys_entry, rb_node);
e                  27 tools/perf/util/mem2node.c 		if (entry->start < e->start)
e                 480 tools/perf/util/parse-events.c static void tracepoint_error(struct parse_events_error *e, int err,
e                 485 tools/perf/util/parse-events.c 	if (!e)
e                 496 tools/perf/util/parse-events.c 		e->str = strdup("can't access trace events");
e                 499 tools/perf/util/parse-events.c 		e->str = strdup("unknown tracepoint");
e                 502 tools/perf/util/parse-events.c 		e->str = strdup("failed to add tracepoint");
e                 507 tools/perf/util/parse-events.c 	e->help = strdup(help);
e                1073 tools/perf/util/session.c 		struct branch_entry *e = &sample->branch_stack->entries[i];
e                1077 tools/perf/util/session.c 				i, e->from, e->to,
e                1078 tools/perf/util/session.c 				(unsigned short)e->flags.cycles,
e                1079 tools/perf/util/session.c 				e->flags.mispred ? "M" : " ",
e                1080 tools/perf/util/session.c 				e->flags.predicted ? "P" : " ",
e                1081 tools/perf/util/session.c 				e->flags.abort ? "A" : " ",
e                1082 tools/perf/util/session.c 				e->flags.in_tx ? "T" : " ",
e                1083 tools/perf/util/session.c 				(unsigned)e->flags.reserved);
e                1086 tools/perf/util/session.c 				i, i > 0 ? e->from : e->to);
e                2414 tools/perf/util/session.c 		struct id_index_entry *e = &ie->entries[i];
e                2418 tools/perf/util/session.c 			fprintf(stdout,	" ... id: %"PRI_lu64, e->id);
e                2419 tools/perf/util/session.c 			fprintf(stdout,	"  idx: %"PRI_lu64, e->idx);
e                2420 tools/perf/util/session.c 			fprintf(stdout,	"  cpu: %"PRI_ld64, e->cpu);
e                2421 tools/perf/util/session.c 			fprintf(stdout,	"  tid: %"PRI_ld64"\n", e->tid);
e                2424 tools/perf/util/session.c 		sid = perf_evlist__id2sid(evlist, e->id);
e                2427 tools/perf/util/session.c 		sid->idx = e->idx;
e                2428 tools/perf/util/session.c 		sid->cpu = e->cpu;
e                2429 tools/perf/util/session.c 		sid->tid = e->tid;
e                 499 tools/perf/util/sort.c static char *hist_entry__get_srcfile(struct hist_entry *e)
e                 502 tools/perf/util/sort.c 	struct map *map = e->ms.map;
e                 507 tools/perf/util/sort.c 	sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
e                 508 tools/perf/util/sort.c 			 e->ms.sym, false, true, true, e->ip);
e                  38 tools/perf/util/strfilter.c static const char *get_token(const char *s, const char **e)
e                  62 tools/perf/util/strfilter.c 	*e = p;
e                  85 tools/perf/util/strfilter.c 	const char *e;
e                  93 tools/perf/util/strfilter.c 	s = get_token(s, &e);
e                 130 tools/perf/util/strfilter.c 			e = s + 1;
e                 138 tools/perf/util/strfilter.c 			cur->r->p = strndup(s, e - s);
e                 142 tools/perf/util/strfilter.c 		s = get_token(e, &e);
e                 226 tools/perf/util/string.c 		char *e = expr;
e                 238 tools/perf/util/string.c 				printed += scnprintf(e + printed, size - printed, " %s ", or_and);
e                 239 tools/perf/util/string.c 			printed += scnprintf(e + printed, size - printed,
e                1602 tools/perf/util/symbol-elf.c 			   u64 pgoff, u64 s, u64 e)
e                1610 tools/perf/util/symbol-elf.c 	len = e < end ? e - s : end - s;
e                1432 tools/perf/util/synthetic-events.c 			struct id_index_entry *e;
e                1443 tools/perf/util/synthetic-events.c 			e = &ev->id_index.entries[i++];
e                1445 tools/perf/util/synthetic-events.c 			e->id = evsel->core.id[j];
e                1447 tools/perf/util/synthetic-events.c 			sid = perf_evlist__id2sid(evlist, e->id);
e                1453 tools/perf/util/synthetic-events.c 			e->idx = sid->idx;
e                1454 tools/perf/util/synthetic-events.c 			e->cpu = sid->cpu;
e                1455 tools/perf/util/synthetic-events.c 			e->tid = sid->tid;
e                  77 tools/perf/util/unwind-libdw.c 	struct unwind_entry *e = &ui->entries[ui->idx++];
e                  83 tools/perf/util/unwind-libdw.c 	e->ip  = ip;
e                  84 tools/perf/util/unwind-libdw.c 	e->map = al.map;
e                  85 tools/perf/util/unwind-libdw.c 	e->sym = al.sym;
e                 575 tools/perf/util/unwind-libunwind-local.c 	struct unwind_entry e;
e                 578 tools/perf/util/unwind-libunwind-local.c 	e.sym = thread__find_symbol(thread, PERF_RECORD_MISC_USER, ip, &al);
e                 579 tools/perf/util/unwind-libunwind-local.c 	e.ip  = ip;
e                 580 tools/perf/util/unwind-libunwind-local.c 	e.map = al.map;
e                 587 tools/perf/util/unwind-libunwind-local.c 	return cb(&e, arg);
e                 119 tools/testing/selftests/bpf/prog_tests/core_reloc.c 		.e = { [2] = 5 },					\
e                 127 tools/testing/selftests/bpf/prog_tests/core_reloc.c 		.e = 5, .f = 6, .g = 7, .h = 8,				\
e                  27 tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c 	struct get_stack_trace_t *e = data;
e                  60 tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c 		num_stack = e->kern_stack_size / sizeof(__u64);
e                  65 tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c 				ks = ksym_search(e->kern_stack[i]);
e                  72 tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c 		if (e->user_stack_size > 0 && e->user_stack_buildid_size > 0)
e                  36 tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c 	short e: 5;
e                  64 tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c 	int e; /* combined with previous bitfield */
e                  36 tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c 	} __attribute__((packed)) e;
e                  84 tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c 		int e[5];
e                 104 tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c 	ptr_arr_t e;
e                 139 tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c 	enum e2 e;
e                 169 tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c 		int e;
e                 184 tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c 		} e;
e                 465 tools/testing/selftests/bpf/progs/core_reloc_types.h 	int a, b, c, d, e, f, g, h;
e                 487 tools/testing/selftests/bpf/progs/core_reloc_types.h 	int e[3];
e                 500 tools/testing/selftests/bpf/progs/core_reloc_types.h 	arr_t e;
e                 526 tools/testing/selftests/bpf/progs/core_reloc_types.h 	arr4_t e;
e                  16 tools/testing/selftests/bpf/progs/test_core_reloc_mods.c 	int a, b, c, d, e, f, g, h;
e                  38 tools/testing/selftests/bpf/progs/test_core_reloc_mods.c 	int e[3];
e                  54 tools/testing/selftests/bpf/progs/test_core_reloc_mods.c 	    BPF_CORE_READ(&out->e, &in->e[2]) ||
e                  29 tools/testing/selftests/kvm/include/test_util.h #define TEST_ASSERT(e, fmt, ...) \
e                  30 tools/testing/selftests/kvm/include/test_util.h 	test_assert((e), #e, __FILE__, __LINE__, fmt, ##__VA_ARGS__)
e                  19 tools/testing/selftests/powerpc/pmu/count_instructions.c static void setup_event(struct event *e, u64 config, char *name)
e                  21 tools/testing/selftests/powerpc/pmu/count_instructions.c 	event_init_opts(e, config, PERF_TYPE_HARDWARE, name);
e                  23 tools/testing/selftests/powerpc/pmu/count_instructions.c 	e->attr.disabled = 1;
e                  24 tools/testing/selftests/powerpc/pmu/count_instructions.c 	e->attr.exclude_kernel = 1;
e                  25 tools/testing/selftests/powerpc/pmu/count_instructions.c 	e->attr.exclude_hv = 1;
e                  26 tools/testing/selftests/powerpc/pmu/count_instructions.c 	e->attr.exclude_idle = 1;
e                 275 tools/testing/selftests/powerpc/pmu/ebb/ebb.c int ebb_event_enable(struct event *e)
e                 282 tools/testing/selftests/powerpc/pmu/ebb/ebb.c 	rc = ioctl(e->fd, PERF_EVENT_IOC_ENABLE);
e                 286 tools/testing/selftests/powerpc/pmu/ebb/ebb.c 	rc = event_read(e);
e                 331 tools/testing/selftests/powerpc/pmu/ebb/ebb.c void event_ebb_init(struct event *e)
e                 333 tools/testing/selftests/powerpc/pmu/ebb/ebb.c 	e->attr.config |= (1ull << 63);
e                 336 tools/testing/selftests/powerpc/pmu/ebb/ebb.c void event_bhrb_init(struct event *e, unsigned ifm)
e                 338 tools/testing/selftests/powerpc/pmu/ebb/ebb.c 	e->attr.config |= (1ull << 62) | ((u64)ifm << 60);
e                 341 tools/testing/selftests/powerpc/pmu/ebb/ebb.c void event_leader_ebb_init(struct event *e)
e                 343 tools/testing/selftests/powerpc/pmu/ebb/ebb.c 	event_ebb_init(e);
e                 345 tools/testing/selftests/powerpc/pmu/ebb/ebb.c 	e->attr.exclusive = 1;
e                 346 tools/testing/selftests/powerpc/pmu/ebb/ebb.c 	e->attr.pinned = 1;
e                  47 tools/testing/selftests/powerpc/pmu/ebb/ebb.h void event_leader_ebb_init(struct event *e);
e                  48 tools/testing/selftests/powerpc/pmu/ebb/ebb.h void event_ebb_init(struct event *e);
e                  49 tools/testing/selftests/powerpc/pmu/ebb/ebb.h void event_bhrb_init(struct event *e, unsigned ifm);
e                  52 tools/testing/selftests/powerpc/pmu/ebb/ebb.h int ebb_event_enable(struct event *e);
e                  58 tools/testing/selftests/powerpc/pmu/ebb/ebb.h void event_ebb_init(struct event *e);
e                  59 tools/testing/selftests/powerpc/pmu/ebb/ebb.h void event_leader_ebb_init(struct event *e);
e                  78 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	struct trace_entry *e;
e                  80 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	e = trace_alloc(tb, sizeof(*e) + payload_size);
e                  81 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	if (e)
e                  82 tools/testing/selftests/powerpc/pmu/ebb/trace.c 		e->length = payload_size;
e                  84 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	return e;
e                  89 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	struct trace_entry *e;
e                  92 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	e = trace_alloc_entry(tb, sizeof(reg) + sizeof(value));
e                  93 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	if (!e)
e                  96 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	e->type = TRACE_TYPE_REG;
e                  97 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	p = (u64 *)e->data;
e                 106 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	struct trace_entry *e;
e                 109 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	e = trace_alloc_entry(tb, sizeof(value));
e                 110 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	if (!e)
e                 113 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	e->type = TRACE_TYPE_COUNTER;
e                 114 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	p = (u64 *)e->data;
e                 122 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	struct trace_entry *e;
e                 129 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	e = trace_alloc_entry(tb, len + 1);
e                 130 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	if (!e)
e                 133 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	e->type = TRACE_TYPE_STRING;
e                 134 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	p = (char *)e->data;
e                 144 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	struct trace_entry *e;
e                 146 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	e = trace_alloc_entry(tb, 0);
e                 147 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	if (!e)
e                 150 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	e->type = TRACE_TYPE_INDENT;
e                 157 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	struct trace_entry *e;
e                 159 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	e = trace_alloc_entry(tb, 0);
e                 160 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	if (!e)
e                 163 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	e->type = TRACE_TYPE_OUTDENT;
e                 200 tools/testing/selftests/powerpc/pmu/ebb/trace.c static void trace_print_reg(struct trace_entry *e)
e                 205 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	p = (u64 *)e->data;
e                 216 tools/testing/selftests/powerpc/pmu/ebb/trace.c static void trace_print_counter(struct trace_entry *e)
e                 220 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	value = (u64 *)e->data;
e                 224 tools/testing/selftests/powerpc/pmu/ebb/trace.c static void trace_print_string(struct trace_entry *e)
e                 228 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	str = (char *)e->data;
e                 235 tools/testing/selftests/powerpc/pmu/ebb/trace.c static void trace_print_entry(struct trace_entry *e, int seq, int *prefix)
e                 237 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	switch (e->type) {
e                 240 tools/testing/selftests/powerpc/pmu/ebb/trace.c 		trace_print_reg(e);
e                 244 tools/testing/selftests/powerpc/pmu/ebb/trace.c 		trace_print_counter(e);
e                 248 tools/testing/selftests/powerpc/pmu/ebb/trace.c 		trace_print_string(e);
e                 264 tools/testing/selftests/powerpc/pmu/ebb/trace.c 		printf("entry @ %p type %d\n", e, e->type);
e                 271 tools/testing/selftests/powerpc/pmu/ebb/trace.c 	struct trace_entry *e;
e                 288 tools/testing/selftests/powerpc/pmu/ebb/trace.c 		e = p;
e                 290 tools/testing/selftests/powerpc/pmu/ebb/trace.c 		trace_print_entry(e, i, &prefix);
e                 293 tools/testing/selftests/powerpc/pmu/ebb/trace.c 		p = (void *)e + sizeof(*e) + e->length;
e                  23 tools/testing/selftests/powerpc/pmu/event.c void event_init_opts(struct event *e, u64 config, int type, char *name)
e                  25 tools/testing/selftests/powerpc/pmu/event.c 	memset(e, 0, sizeof(*e));
e                  27 tools/testing/selftests/powerpc/pmu/event.c 	e->name = name;
e                  29 tools/testing/selftests/powerpc/pmu/event.c 	e->attr.type = type;
e                  30 tools/testing/selftests/powerpc/pmu/event.c 	e->attr.config = config;
e                  31 tools/testing/selftests/powerpc/pmu/event.c 	e->attr.size = sizeof(e->attr);
e                  33 tools/testing/selftests/powerpc/pmu/event.c 	e->attr.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | \
e                  37 tools/testing/selftests/powerpc/pmu/event.c void event_init_named(struct event *e, u64 config, char *name)
e                  39 tools/testing/selftests/powerpc/pmu/event.c 	event_init_opts(e, config, PERF_TYPE_RAW, name);
e                  42 tools/testing/selftests/powerpc/pmu/event.c void event_init(struct event *e, u64 config)
e                  44 tools/testing/selftests/powerpc/pmu/event.c 	event_init_opts(e, config, PERF_TYPE_RAW, "event");
e                  52 tools/testing/selftests/powerpc/pmu/event.c int event_open_with_options(struct event *e, pid_t pid, int cpu, int group_fd)
e                  54 tools/testing/selftests/powerpc/pmu/event.c 	e->fd = perf_event_open(&e->attr, pid, cpu, group_fd, 0);
e                  55 tools/testing/selftests/powerpc/pmu/event.c 	if (e->fd == -1) {
e                  63 tools/testing/selftests/powerpc/pmu/event.c int event_open_with_group(struct event *e, int group_fd)
e                  65 tools/testing/selftests/powerpc/pmu/event.c 	return event_open_with_options(e, PERF_CURRENT_PID, PERF_NO_CPU, group_fd);
e                  68 tools/testing/selftests/powerpc/pmu/event.c int event_open_with_pid(struct event *e, pid_t pid)
e                  70 tools/testing/selftests/powerpc/pmu/event.c 	return event_open_with_options(e, pid, PERF_NO_CPU, PERF_NO_GROUP);
e                  73 tools/testing/selftests/powerpc/pmu/event.c int event_open_with_cpu(struct event *e, int cpu)
e                  75 tools/testing/selftests/powerpc/pmu/event.c 	return event_open_with_options(e, PERF_NO_PID, cpu, PERF_NO_GROUP);
e                  78 tools/testing/selftests/powerpc/pmu/event.c int event_open(struct event *e)
e                  80 tools/testing/selftests/powerpc/pmu/event.c 	return event_open_with_options(e, PERF_CURRENT_PID, PERF_NO_CPU, PERF_NO_GROUP);
e                  83 tools/testing/selftests/powerpc/pmu/event.c void event_close(struct event *e)
e                  85 tools/testing/selftests/powerpc/pmu/event.c 	close(e->fd);
e                  88 tools/testing/selftests/powerpc/pmu/event.c int event_enable(struct event *e)
e                  90 tools/testing/selftests/powerpc/pmu/event.c 	return ioctl(e->fd, PERF_EVENT_IOC_ENABLE);
e                  93 tools/testing/selftests/powerpc/pmu/event.c int event_disable(struct event *e)
e                  95 tools/testing/selftests/powerpc/pmu/event.c 	return ioctl(e->fd, PERF_EVENT_IOC_DISABLE);
e                  98 tools/testing/selftests/powerpc/pmu/event.c int event_reset(struct event *e)
e                 100 tools/testing/selftests/powerpc/pmu/event.c 	return ioctl(e->fd, PERF_EVENT_IOC_RESET);
e                 103 tools/testing/selftests/powerpc/pmu/event.c int event_read(struct event *e)
e                 107 tools/testing/selftests/powerpc/pmu/event.c 	rc = read(e->fd, &e->result, sizeof(e->result));
e                 108 tools/testing/selftests/powerpc/pmu/event.c 	if (rc != sizeof(e->result)) {
e                 109 tools/testing/selftests/powerpc/pmu/event.c 		fprintf(stderr, "read error on event %p!\n", e);
e                 116 tools/testing/selftests/powerpc/pmu/event.c void event_report_justified(struct event *e, int name_width, int result_width)
e                 118 tools/testing/selftests/powerpc/pmu/event.c 	printf("%*s: result %*llu ", name_width, e->name, result_width,
e                 119 tools/testing/selftests/powerpc/pmu/event.c 	       e->result.value);
e                 121 tools/testing/selftests/powerpc/pmu/event.c 	if (e->result.running == e->result.enabled)
e                 122 tools/testing/selftests/powerpc/pmu/event.c 		printf("running/enabled %llu\n", e->result.running);
e                 124 tools/testing/selftests/powerpc/pmu/event.c 		printf("running %llu enabled %llu\n", e->result.running,
e                 125 tools/testing/selftests/powerpc/pmu/event.c 			e->result.enabled);
e                 128 tools/testing/selftests/powerpc/pmu/event.c void event_report(struct event *e)
e                 130 tools/testing/selftests/powerpc/pmu/event.c 	event_report_justified(e, 0, 0);
e                  27 tools/testing/selftests/powerpc/pmu/event.h void event_init(struct event *e, u64 config);
e                  28 tools/testing/selftests/powerpc/pmu/event.h void event_init_named(struct event *e, u64 config, char *name);
e                  29 tools/testing/selftests/powerpc/pmu/event.h void event_init_opts(struct event *e, u64 config, int type, char *name);
e                  30 tools/testing/selftests/powerpc/pmu/event.h int event_open_with_options(struct event *e, pid_t pid, int cpu, int group_fd);
e                  31 tools/testing/selftests/powerpc/pmu/event.h int event_open_with_group(struct event *e, int group_fd);
e                  32 tools/testing/selftests/powerpc/pmu/event.h int event_open_with_pid(struct event *e, pid_t pid);
e                  33 tools/testing/selftests/powerpc/pmu/event.h int event_open_with_cpu(struct event *e, int cpu);
e                  34 tools/testing/selftests/powerpc/pmu/event.h int event_open(struct event *e);
e                  35 tools/testing/selftests/powerpc/pmu/event.h void event_close(struct event *e);
e                  36 tools/testing/selftests/powerpc/pmu/event.h int event_enable(struct event *e);
e                  37 tools/testing/selftests/powerpc/pmu/event.h int event_disable(struct event *e);
e                  38 tools/testing/selftests/powerpc/pmu/event.h int event_reset(struct event *e);
e                  39 tools/testing/selftests/powerpc/pmu/event.h int event_read(struct event *e);
e                  40 tools/testing/selftests/powerpc/pmu/event.h void event_report_justified(struct event *e, int name_width, int result_width);
e                  41 tools/testing/selftests/powerpc/pmu/event.h void event_report(struct event *e);
e                  25 tools/testing/selftests/powerpc/pmu/per_event_excludes.c 	struct event *e, events[4];
e                  37 tools/testing/selftests/powerpc/pmu/per_event_excludes.c 	e = &events[0];
e                  38 tools/testing/selftests/powerpc/pmu/per_event_excludes.c 	event_init_opts(e, PERF_COUNT_HW_INSTRUCTIONS,
e                  40 tools/testing/selftests/powerpc/pmu/per_event_excludes.c 	e->attr.disabled = 1;
e                  42 tools/testing/selftests/powerpc/pmu/per_event_excludes.c 	e = &events[1];
e                  43 tools/testing/selftests/powerpc/pmu/per_event_excludes.c 	event_init_opts(e, PERF_COUNT_HW_INSTRUCTIONS,
e                  45 tools/testing/selftests/powerpc/pmu/per_event_excludes.c 	e->attr.disabled = 1;
e                  46 tools/testing/selftests/powerpc/pmu/per_event_excludes.c 	e->attr.exclude_user = 1;
e                  47 tools/testing/selftests/powerpc/pmu/per_event_excludes.c 	e->attr.exclude_hv = 1;
e                  49 tools/testing/selftests/powerpc/pmu/per_event_excludes.c 	e = &events[2];
e                  50 tools/testing/selftests/powerpc/pmu/per_event_excludes.c 	event_init_opts(e, PERF_COUNT_HW_INSTRUCTIONS,
e                  52 tools/testing/selftests/powerpc/pmu/per_event_excludes.c 	e->attr.disabled = 1;
e                  53 tools/testing/selftests/powerpc/pmu/per_event_excludes.c 	e->attr.exclude_user = 1;
e                  54 tools/testing/selftests/powerpc/pmu/per_event_excludes.c 	e->attr.exclude_kernel = 1;
e                  56 tools/testing/selftests/powerpc/pmu/per_event_excludes.c 	e = &events[3];
e                  57 tools/testing/selftests/powerpc/pmu/per_event_excludes.c 	event_init_opts(e, PERF_COUNT_HW_INSTRUCTIONS,
e                  59 tools/testing/selftests/powerpc/pmu/per_event_excludes.c 	e->attr.disabled = 1;
e                  60 tools/testing/selftests/powerpc/pmu/per_event_excludes.c 	e->attr.exclude_hv = 1;
e                  61 tools/testing/selftests/powerpc/pmu/per_event_excludes.c 	e->attr.exclude_kernel = 1;
e                 359 tools/usb/ffs-aio-example/multibuff/device_app/aio_multibuff.c 		struct io_event e[BUFS_MAX];
e                 361 tools/usb/ffs-aio-example/multibuff/device_app/aio_multibuff.c 		ret = io_getevents(ctx, 1, BUFS_MAX, e, NULL);
e                 309 tools/usb/ffs-aio-example/simple/device_app/aio_simple.c 			struct io_event e[2];
e                 311 tools/usb/ffs-aio-example/simple/device_app/aio_simple.c 			ret = io_getevents(ctx, 1, 2, e, NULL);
e                 314 tools/usb/ffs-aio-example/simple/device_app/aio_simple.c 				if (e[i].obj->aio_fildes == ep[0]) {
e                 315 tools/usb/ffs-aio-example/simple/device_app/aio_simple.c 					printf("ev=in; ret=%lu\n", e[i].res);
e                 317 tools/usb/ffs-aio-example/simple/device_app/aio_simple.c 				} else if (e[i].obj->aio_fildes == ep[1]) {
e                 318 tools/usb/ffs-aio-example/simple/device_app/aio_simple.c 					printf("ev=out; ret=%lu\n", e[i].res);
e                  18 virt/kvm/arm/vgic/vgic-irqfd.c static int vgic_irqfd_set_irq(struct kvm_kernel_irq_routing_entry *e,
e                  22 virt/kvm/arm/vgic/vgic-irqfd.c 	unsigned int spi_id = e->irqchip.pin + VGIC_NR_PRIVATE_IRQS;
e                  39 virt/kvm/arm/vgic/vgic-irqfd.c 			  struct kvm_kernel_irq_routing_entry *e,
e                  46 virt/kvm/arm/vgic/vgic-irqfd.c 		e->set = vgic_irqfd_set_irq;
e                  47 virt/kvm/arm/vgic/vgic-irqfd.c 		e->irqchip.irqchip = ue->u.irqchip.irqchip;
e                  48 virt/kvm/arm/vgic/vgic-irqfd.c 		e->irqchip.pin = ue->u.irqchip.pin;
e                  49 virt/kvm/arm/vgic/vgic-irqfd.c 		if ((e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS) ||
e                  50 virt/kvm/arm/vgic/vgic-irqfd.c 		    (e->irqchip.irqchip >= KVM_NR_IRQCHIPS))
e                  54 virt/kvm/arm/vgic/vgic-irqfd.c 		e->set = kvm_set_msi;
e                  55 virt/kvm/arm/vgic/vgic-irqfd.c 		e->msi.address_lo = ue->u.msi.address_lo;
e                  56 virt/kvm/arm/vgic/vgic-irqfd.c 		e->msi.address_hi = ue->u.msi.address_hi;
e                  57 virt/kvm/arm/vgic/vgic-irqfd.c 		e->msi.data = ue->u.msi.data;
e                  58 virt/kvm/arm/vgic/vgic-irqfd.c 		e->msi.flags = ue->flags;
e                  59 virt/kvm/arm/vgic/vgic-irqfd.c 		e->msi.devid = ue->u.msi.devid;
e                  69 virt/kvm/arm/vgic/vgic-irqfd.c static void kvm_populate_msi(struct kvm_kernel_irq_routing_entry *e,
e                  72 virt/kvm/arm/vgic/vgic-irqfd.c 	msi->address_lo = e->msi.address_lo;
e                  73 virt/kvm/arm/vgic/vgic-irqfd.c 	msi->address_hi = e->msi.address_hi;
e                  74 virt/kvm/arm/vgic/vgic-irqfd.c 	msi->data = e->msi.data;
e                  75 virt/kvm/arm/vgic/vgic-irqfd.c 	msi->flags = e->msi.flags;
e                  76 virt/kvm/arm/vgic/vgic-irqfd.c 	msi->devid = e->msi.devid;
e                  85 virt/kvm/arm/vgic/vgic-irqfd.c int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
e                  97 virt/kvm/arm/vgic/vgic-irqfd.c 	kvm_populate_msi(e, &msi);
e                 106 virt/kvm/arm/vgic/vgic-irqfd.c int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
e                 110 virt/kvm/arm/vgic/vgic-irqfd.c 	if (e->type == KVM_IRQ_ROUTING_MSI && vgic_has_its(kvm) && level) {
e                 113 virt/kvm/arm/vgic/vgic-irqfd.c 		kvm_populate_msi(e, &msi);
e                 245 virt/kvm/eventfd.c 	struct kvm_kernel_irq_routing_entry *e;
e                 253 virt/kvm/eventfd.c 	e = entries;
e                 255 virt/kvm/eventfd.c 		irqfd->irq_entry = *e;
e                  26 virt/kvm/irqchip.c 	struct kvm_kernel_irq_routing_entry *e;
e                  32 virt/kvm/irqchip.c 		hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
e                  33 virt/kvm/irqchip.c 			entries[n] = *e;
e                 108 virt/kvm/irqchip.c 		struct kvm_kernel_irq_routing_entry *e;
e                 111 virt/kvm/irqchip.c 		hlist_for_each_entry_safe(e, n, &rt->map[i], link) {
e                 112 virt/kvm/irqchip.c 			hlist_del(&e->link);
e                 113 virt/kvm/irqchip.c 			kfree(e);
e                 130 virt/kvm/irqchip.c 			       struct kvm_kernel_irq_routing_entry *e,
e                 147 virt/kvm/irqchip.c 	e->gsi = gsi;
e                 148 virt/kvm/irqchip.c 	e->type = ue->type;
e                 149 virt/kvm/irqchip.c 	r = kvm_set_routing_entry(kvm, e, ue);
e                 152 virt/kvm/irqchip.c 	if (e->type == KVM_IRQ_ROUTING_IRQCHIP)
e                 153 virt/kvm/irqchip.c 		rt->chip[e->irqchip.irqchip][e->irqchip.pin] = e->gsi;
e                 155 virt/kvm/irqchip.c 	hlist_add_head(&e->link, &rt->map[e->gsi]);
e                 175 virt/kvm/irqchip.c 	struct kvm_kernel_irq_routing_entry *e;
e                 198 virt/kvm/irqchip.c 		e = kzalloc(sizeof(*e), GFP_KERNEL_ACCOUNT);
e                 199 virt/kvm/irqchip.c 		if (!e)
e                 213 virt/kvm/irqchip.c 		r = setup_routing_entry(kvm, new, e, ue);
e                 235 virt/kvm/irqchip.c 	kfree(e);