cl                 56 arch/arm64/include/asm/atomic_ll_sc.h #define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
cl                 73 arch/arm64/include/asm/atomic_ll_sc.h 	: cl);								\
cl                 78 arch/arm64/include/asm/atomic_ll_sc.h #define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \
cl                 95 arch/arm64/include/asm/atomic_ll_sc.h 	: cl);								\
cl                155 arch/arm64/include/asm/atomic_ll_sc.h #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
cl                172 arch/arm64/include/asm/atomic_ll_sc.h 	: cl);								\
cl                177 arch/arm64/include/asm/atomic_ll_sc.h #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\
cl                194 arch/arm64/include/asm/atomic_ll_sc.h 	: cl);								\
cl                259 arch/arm64/include/asm/atomic_ll_sc.h #define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint)	\
cl                289 arch/arm64/include/asm/atomic_ll_sc.h 	: cl);								\
cl                318 arch/arm64/include/asm/atomic_ll_sc.h #define __CMPXCHG_DBL(name, mb, rel, cl)				\
cl                342 arch/arm64/include/asm/atomic_ll_sc.h 	: cl);								\
cl                 30 arch/arm64/include/asm/atomic_lse.h #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...)			\
cl                 38 arch/arm64/include/asm/atomic_lse.h 	: cl);								\
cl                 57 arch/arm64/include/asm/atomic_lse.h #define ATOMIC_OP_ADD_RETURN(name, mb, cl...)				\
cl                 68 arch/arm64/include/asm/atomic_lse.h 	: cl);								\
cl                 90 arch/arm64/include/asm/atomic_lse.h #define ATOMIC_FETCH_OP_AND(name, mb, cl...)				\
cl                 99 arch/arm64/include/asm/atomic_lse.h 	: cl);								\
cl                121 arch/arm64/include/asm/atomic_lse.h #define ATOMIC_OP_SUB_RETURN(name, mb, cl...)				\
cl                133 arch/arm64/include/asm/atomic_lse.h 	: cl);							\
cl                145 arch/arm64/include/asm/atomic_lse.h #define ATOMIC_FETCH_OP_SUB(name, mb, cl...)				\
cl                154 arch/arm64/include/asm/atomic_lse.h 	: cl);								\
cl                183 arch/arm64/include/asm/atomic_lse.h #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...)			\
cl                191 arch/arm64/include/asm/atomic_lse.h 	: cl);								\
cl                210 arch/arm64/include/asm/atomic_lse.h #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...)				\
cl                221 arch/arm64/include/asm/atomic_lse.h 	: cl);								\
cl                243 arch/arm64/include/asm/atomic_lse.h #define ATOMIC64_FETCH_OP_AND(name, mb, cl...)				\
cl                252 arch/arm64/include/asm/atomic_lse.h 	: cl);								\
cl                274 arch/arm64/include/asm/atomic_lse.h #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...)				\
cl                286 arch/arm64/include/asm/atomic_lse.h 	: cl);								\
cl                298 arch/arm64/include/asm/atomic_lse.h #define ATOMIC64_FETCH_OP_SUB(name, mb, cl...)				\
cl                307 arch/arm64/include/asm/atomic_lse.h 	: cl);								\
cl                340 arch/arm64/include/asm/atomic_lse.h #define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...)			\
cl                359 arch/arm64/include/asm/atomic_lse.h 	: cl);								\
cl                383 arch/arm64/include/asm/atomic_lse.h #define __CMPXCHG_DBL(name, mb, cl...)					\
cl                409 arch/arm64/include/asm/atomic_lse.h 	: cl);								\
cl                 21 arch/arm64/include/asm/cmpxchg.h #define __XCHG_CASE(w, sfx, name, sz, mb, nop_lse, acq, acq_lse, rel, cl)	\
cl                 40 arch/arm64/include/asm/cmpxchg.h 	: cl);									\
cl               1883 arch/mips/include/asm/octeon/cvmx-lmcx-defs.h 		uint64_t cl:4;
cl               1903 arch/mips/include/asm/octeon/cvmx-lmcx-defs.h 		uint64_t cl:4;
cl                 67 arch/mips/include/asm/octeon/cvmx-pciercx-defs.h 		__BITFIELD_FIELD(uint32_t cl:1,
cl                 50 arch/mips/kernel/smp-cps.c 	int cl, c, v;
cl                 56 arch/mips/kernel/smp-cps.c 	for (cl = 0; cl < nclusters; cl++) {
cl                 57 arch/mips/kernel/smp-cps.c 		if (cl > 0)
cl                 61 arch/mips/kernel/smp-cps.c 		ncores = mips_cps_numcores(cl);
cl                 63 arch/mips/kernel/smp-cps.c 			core_vpes = core_vpe_count(cl, c);
cl                 70 arch/mips/kernel/smp-cps.c 			if (!cl && !c)
cl                 74 arch/mips/kernel/smp-cps.c 				cpu_set_cluster(&cpu_data[nvpes + v], cl);
cl                 91 arch/mips/lantiq/clk.c 				clk->cl.dev_id, clk->cl.con_id, rate);
cl                 59 arch/mips/lantiq/clk.h 	struct clk_lookup cl;
cl                170 arch/mips/lantiq/falcon/sysctrl.c 	clk->cl.dev_id = dev;
cl                171 arch/mips/lantiq/falcon/sysctrl.c 	clk->cl.con_id = NULL;
cl                172 arch/mips/lantiq/falcon/sysctrl.c 	clk->cl.clk = clk;
cl                180 arch/mips/lantiq/falcon/sysctrl.c 	clkdev_add(&clk->cl);
cl                125 arch/mips/lantiq/xway/gptu.c 	clk->cl.dev_id = dev_name(dev);
cl                126 arch/mips/lantiq/xway/gptu.c 	clk->cl.con_id = con;
cl                127 arch/mips/lantiq/xway/gptu.c 	clk->cl.clk = clk;
cl                131 arch/mips/lantiq/xway/gptu.c 	clkdev_add(&clk->cl);
cl                314 arch/mips/lantiq/xway/sysctrl.c 	clk->cl.dev_id = dev;
cl                315 arch/mips/lantiq/xway/sysctrl.c 	clk->cl.con_id = con;
cl                316 arch/mips/lantiq/xway/sysctrl.c 	clk->cl.clk = clk;
cl                328 arch/mips/lantiq/xway/sysctrl.c 	clkdev_add(&clk->cl);
cl                337 arch/mips/lantiq/xway/sysctrl.c 	clk->cl.dev_id = dev;
cl                338 arch/mips/lantiq/xway/sysctrl.c 	clk->cl.con_id = con;
cl                339 arch/mips/lantiq/xway/sysctrl.c 	clk->cl.clk = clk;
cl                343 arch/mips/lantiq/xway/sysctrl.c 	clkdev_add(&clk->cl);
cl                355 arch/mips/lantiq/xway/sysctrl.c 	clk->cl.dev_id = "17000000.pci";
cl                356 arch/mips/lantiq/xway/sysctrl.c 	clk->cl.con_id = NULL;
cl                357 arch/mips/lantiq/xway/sysctrl.c 	clk->cl.clk = clk;
cl                364 arch/mips/lantiq/xway/sysctrl.c 	clkdev_add(&clk->cl);
cl                367 arch/mips/lantiq/xway/sysctrl.c 	clk_ext->cl.dev_id = "17000000.pci";
cl                368 arch/mips/lantiq/xway/sysctrl.c 	clk_ext->cl.con_id = "external";
cl                369 arch/mips/lantiq/xway/sysctrl.c 	clk_ext->cl.clk = clk_ext;
cl                372 arch/mips/lantiq/xway/sysctrl.c 	clkdev_add(&clk_ext->cl);
cl                395 arch/mips/lantiq/xway/sysctrl.c 		clk->cl.dev_id = "1f103000.cgu";
cl                396 arch/mips/lantiq/xway/sysctrl.c 		clk->cl.con_id = name;
cl                397 arch/mips/lantiq/xway/sysctrl.c 		clk->cl.clk = clk;
cl                402 arch/mips/lantiq/xway/sysctrl.c 		clkdev_add(&clk->cl);
cl                 19 arch/mips/ralink/clk.c 	struct clk_lookup cl;
cl                 30 arch/mips/ralink/clk.c 	clk->cl.dev_id = dev;
cl                 31 arch/mips/ralink/clk.c 	clk->cl.clk = clk;
cl                 35 arch/mips/ralink/clk.c 	clkdev_add(&clk->cl);
cl                 29 arch/openrisc/kernel/dma.c 	unsigned long cl;
cl                 41 arch/openrisc/kernel/dma.c 	for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size)
cl                 42 arch/openrisc/kernel/dma.c 		mtspr(SPR_DCBFR, cl);
cl                131 arch/openrisc/kernel/dma.c 	unsigned long cl;
cl                137 arch/openrisc/kernel/dma.c 		for (cl = addr; cl < addr + size;
cl                138 arch/openrisc/kernel/dma.c 		     cl += cpuinfo->dcache_block_size)
cl                139 arch/openrisc/kernel/dma.c 			mtspr(SPR_DCBFR, cl);
cl                143 arch/openrisc/kernel/dma.c 		for (cl = addr; cl < addr + size;
cl                144 arch/openrisc/kernel/dma.c 		     cl += cpuinfo->dcache_block_size)
cl                145 arch/openrisc/kernel/dma.c 			mtspr(SPR_DCBIR, cl);
cl                 17 arch/powerpc/include/asm/cmpxchg.h #define XCHG_GEN(type, sfx, cl)				\
cl                 37 arch/powerpc/include/asm/cmpxchg.h 	: "cc", cl);						\
cl                 42 arch/powerpc/include/asm/cmpxchg.h #define CMPXCHG_GEN(type, sfx, br, br2, cl)			\
cl                 71 arch/powerpc/include/asm/cmpxchg.h 	: "cc", cl);						\
cl                635 arch/powerpc/math-emu/math_efp.c 			int ch, cl;
cl                644 arch/powerpc/math-emu/math_efp.c 			cl = (IR1 == cmp) ? 1 : 0;
cl                645 arch/powerpc/math-emu/math_efp.c 			IR = (ch << 3) | (cl << 2) | ((ch | cl) << 1) |
cl                646 arch/powerpc/math-emu/math_efp.c 				((ch & cl) << 0);
cl                 40 arch/sh/kernel/process_64.c 	unsigned long long ah, al, bh, bl, ch, cl;
cl                 50 arch/sh/kernel/process_64.c 	cl = (regs->regs[15]) & 0xffffffff;
cl                 52 arch/sh/kernel/process_64.c 	       ah, al, bh, bl, ch, cl);
cl                 61 arch/sh/kernel/process_64.c         asm volatile ("getcon   " __KCR0 ", %0" : "=r" (cl));
cl                 63 arch/sh/kernel/process_64.c 	cl = (cl) & 0xffffffff;
cl                 65 arch/sh/kernel/process_64.c 	       ah, al, bh, bl, ch, cl);
cl                 72 arch/sh/kernel/process_64.c 	cl = (regs->regs[2]) & 0xffffffff;
cl                 74 arch/sh/kernel/process_64.c 	       ah, al, bh, bl, ch, cl);
cl                 81 arch/sh/kernel/process_64.c 	cl = (regs->regs[5]) & 0xffffffff;
cl                 83 arch/sh/kernel/process_64.c 	       ah, al, bh, bl, ch, cl);
cl                 90 arch/sh/kernel/process_64.c 	cl = (regs->regs[8]) & 0xffffffff;
cl                 92 arch/sh/kernel/process_64.c 	       ah, al, bh, bl, ch, cl);
cl                 99 arch/sh/kernel/process_64.c 	cl = (regs->regs[11]) & 0xffffffff;
cl                101 arch/sh/kernel/process_64.c 	       ah, al, bh, bl, ch, cl);
cl                108 arch/sh/kernel/process_64.c 	cl = (regs->regs[14]) & 0xffffffff;
cl                110 arch/sh/kernel/process_64.c 	       ah, al, bh, bl, ch, cl);
cl                117 arch/sh/kernel/process_64.c 	cl = (regs->regs[19]) & 0xffffffff;
cl                119 arch/sh/kernel/process_64.c 	       ah, al, bh, bl, ch, cl);
cl                126 arch/sh/kernel/process_64.c 	cl = (regs->regs[22]) & 0xffffffff;
cl                128 arch/sh/kernel/process_64.c 	       ah, al, bh, bl, ch, cl);
cl                135 arch/sh/kernel/process_64.c 	cl = (regs->regs[25]) & 0xffffffff;
cl                137 arch/sh/kernel/process_64.c 	       ah, al, bh, bl, ch, cl);
cl                144 arch/sh/kernel/process_64.c 	cl = (regs->regs[28]) & 0xffffffff;
cl                146 arch/sh/kernel/process_64.c 	       ah, al, bh, bl, ch, cl);
cl                153 arch/sh/kernel/process_64.c 	cl = (regs->regs[31]) & 0xffffffff;
cl                155 arch/sh/kernel/process_64.c 	       ah, al, bh, bl, ch, cl);
cl                162 arch/sh/kernel/process_64.c 	cl = (regs->regs[34]) & 0xffffffff;
cl                164 arch/sh/kernel/process_64.c 	       ah, al, bh, bl, ch, cl);
cl                171 arch/sh/kernel/process_64.c 	cl = (regs->regs[37]) & 0xffffffff;
cl                173 arch/sh/kernel/process_64.c 	       ah, al, bh, bl, ch, cl);
cl                180 arch/sh/kernel/process_64.c 	cl = (regs->regs[40]) & 0xffffffff;
cl                182 arch/sh/kernel/process_64.c 	       ah, al, bh, bl, ch, cl);
cl                189 arch/sh/kernel/process_64.c 	cl = (regs->regs[43]) & 0xffffffff;
cl                191 arch/sh/kernel/process_64.c 	       ah, al, bh, bl, ch, cl);
cl                198 arch/sh/kernel/process_64.c 	cl = (regs->regs[46]) & 0xffffffff;
cl                200 arch/sh/kernel/process_64.c 	       ah, al, bh, bl, ch, cl);
cl                207 arch/sh/kernel/process_64.c 	cl = (regs->regs[49]) & 0xffffffff;
cl                209 arch/sh/kernel/process_64.c 	       ah, al, bh, bl, ch, cl);
cl                216 arch/sh/kernel/process_64.c 	cl = (regs->regs[52]) & 0xffffffff;
cl                218 arch/sh/kernel/process_64.c 	       ah, al, bh, bl, ch, cl);
cl                225 arch/sh/kernel/process_64.c 	cl = (regs->regs[55]) & 0xffffffff;
cl                227 arch/sh/kernel/process_64.c 	       ah, al, bh, bl, ch, cl);
cl                234 arch/sh/kernel/process_64.c 	cl = (regs->regs[58]) & 0xffffffff;
cl                236 arch/sh/kernel/process_64.c 	       ah, al, bh, bl, ch, cl);
cl                243 arch/sh/kernel/process_64.c 	cl = (regs->regs[61]) & 0xffffffff;
cl                245 arch/sh/kernel/process_64.c 	       ah, al, bh, bl, ch, cl);
cl                252 arch/sh/kernel/process_64.c 	cl = (regs->tregs[1]) & 0xffffffff;
cl                254 arch/sh/kernel/process_64.c 	       ah, al, bh, bl, ch, cl);
cl                261 arch/sh/kernel/process_64.c 	cl = (regs->tregs[4]) & 0xffffffff;
cl                263 arch/sh/kernel/process_64.c 	       ah, al, bh, bl, ch, cl);
cl                270 arch/sh/kernel/process_64.c 	cl = (regs->tregs[7]) & 0xffffffff;
cl                272 arch/sh/kernel/process_64.c 	       ah, al, bh, bl, ch, cl);
cl                676 arch/sparc/kernel/setup_64.c 		u32 cl, sv, gw;
cl                678 arch/sparc/kernel/setup_64.c 		cl = prom_getintdefault (chosen, "client-ip", 0);
cl                681 arch/sparc/kernel/setup_64.c 		if (cl && sv) {
cl                682 arch/sparc/kernel/setup_64.c 			ic_myaddr = cl;
cl                126 arch/um/drivers/ubd_kern.c 					 .cl = 1 })
cl                129 arch/um/drivers/ubd_kern.c 					 .cl = 1 })
cl                 68 arch/um/include/shared/os.h 	unsigned int cl : 1;    /* FD_CLOEXEC */
cl                 72 arch/um/include/shared/os.h 					  .t = 0, .a = 0, .e = 0, .cl = 0 })
cl                130 arch/um/include/shared/os.h 	flags.cl = 1;
cl                203 arch/um/os-Linux/file.c 	if (flags.cl && fcntl(fd, F_SETFD, 1)) {
cl                262 arch/x86/boot/boot.h 			u8 cl, ch, ecx2, ecx3;
cl                112 arch/x86/boot/edd.c 		ei->legacy_max_cylinder = oreg.ch + ((oreg.cl & 0xc0) << 2);
cl                114 arch/x86/boot/edd.c 		ei->legacy_sectors_per_track = oreg.cl & 0x3f;
cl                 36 arch/x86/boot/video.c 	if ((oreg.ch & 0x1f) > (oreg.cl & 0x1f))
cl                 66 arch/x86/include/asm/asm.h #define _ASM_ARG3B	cl
cl                102 arch/x86/include/asm/asm.h #define _ASM_ARG4B	cl
cl                731 arch/x86/kernel/cpu/resctrl/core.c 	int cl;
cl                734 arch/x86/kernel/cpu/resctrl/core.c 		cl = strlen(r->name);
cl                735 arch/x86/kernel/cpu/resctrl/core.c 		if (cl > max_name_width)
cl                736 arch/x86/kernel/cpu/resctrl/core.c 			max_name_width = cl;
cl                367 arch/x86/kvm/emulate.c 	FOP1E(op, cl) \
cl                376 arch/x86/kvm/emulate.c 	FOP1EEX(op, cl) \
cl                407 arch/x86/kvm/emulate.c 	FOP2E(op##b, al, cl) \
cl                408 arch/x86/kvm/emulate.c 	FOP2E(op##w, ax, cl) \
cl                409 arch/x86/kvm/emulate.c 	FOP2E(op##l, eax, cl) \
cl                410 arch/x86/kvm/emulate.c 	ON64(FOP2E(op##q, rax, cl)) \
cl                431 arch/x86/kvm/emulate.c 	FOP3E(op##w, ax, dx, cl) \
cl                432 arch/x86/kvm/emulate.c 	FOP3E(op##l, eax, edx, cl) \
cl                433 arch/x86/kvm/emulate.c 	ON64(FOP3E(op##q, rax, rdx, cl)) \
cl                406 crypto/vmac.c  	u64 cl = dctx->polytmp[1];
cl                413 crypto/vmac.c  		ADD128(ch, cl, rh, rl);
cl                421 crypto/vmac.c  		poly_step(ch, cl, pkh, pkl, rh, rl);
cl                426 crypto/vmac.c  	dctx->polytmp[1] = cl;
cl                541 crypto/vmac.c  	u64 cl = dctx->polytmp[1];
cl                553 crypto/vmac.c  			poly_step(ch, cl, tctx->polykey[0], tctx->polykey[1],
cl                556 crypto/vmac.c  			ADD128(ch, cl, rh, rl);
cl                560 crypto/vmac.c  	return l3hash(ch, cl, tctx->l3key[0], tctx->l3key[1], partial * 8);
cl                343 drivers/acpi/cppc_acpi.c static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
cl               1596 drivers/atm/idt77252.c 	unsigned long cl, avail;
cl               1615 drivers/atm/idt77252.c 	cl = avail;
cl               1626 drivers/atm/idt77252.c 		if ((cl >= avail) && (card->soft_tst[e].vc == NULL)) {
cl               1640 drivers/atm/idt77252.c 			cl -= card->tst_size;
cl               1646 drivers/atm/idt77252.c 		cl += n;
cl               1576 drivers/atm/nicstar.c 	unsigned long cl;
cl               1598 drivers/atm/nicstar.c 	cl = NS_TST_NUM_ENTRIES;
cl               1602 drivers/atm/nicstar.c 		if (cl >= NS_TST_NUM_ENTRIES && card->tste2vc[e] == NULL) {
cl               1605 drivers/atm/nicstar.c 			cl -= NS_TST_NUM_ENTRIES;
cl               1612 drivers/atm/nicstar.c 		cl += n;
cl                213 drivers/auxdisplay/panel.c 		int cl;
cl                932 drivers/auxdisplay/panel.c 		lcd.pins.cl = PIN_STROBE;
cl                994 drivers/auxdisplay/panel.c 		lcd.pins.cl = lcd_cl_pin;
cl               1013 drivers/auxdisplay/panel.c 		if (lcd.pins.cl == PIN_NOT_SET)
cl               1014 drivers/auxdisplay/panel.c 			lcd.pins.cl = DEFAULT_LCD_PIN_SCL;
cl               1042 drivers/auxdisplay/panel.c 	if (lcd.pins.cl == PIN_NOT_SET)
cl               1043 drivers/auxdisplay/panel.c 		lcd.pins.cl = PIN_NONE;
cl               1063 drivers/auxdisplay/panel.c 	pin_to_bits(lcd.pins.cl, lcd_bits[LCD_PORT_D][LCD_BIT_CL],
cl               1742 drivers/auxdisplay/panel.c 		lcd.pins.cl = lcd_cl_pin;
cl                191 drivers/bus/ti-sysc.c 	struct clk_lookup *cl;
cl                221 drivers/bus/ti-sysc.c 	cl = kcalloc(1, sizeof(*cl), GFP_KERNEL);
cl                222 drivers/bus/ti-sysc.c 	if (!cl)
cl                225 drivers/bus/ti-sysc.c 	cl->con_id = n;
cl                226 drivers/bus/ti-sysc.c 	cl->dev_id = dev_name(ddata->dev);
cl                227 drivers/bus/ti-sysc.c 	cl->clk = clock;
cl                228 drivers/bus/ti-sysc.c 	clkdev_add(cl);
cl                 38 drivers/clk/clkdev.c 	struct clk_lookup *p, *cl = NULL;
cl                 62 drivers/clk/clkdev.c 			cl = p;
cl                 69 drivers/clk/clkdev.c 	return cl;
cl                 74 drivers/clk/clkdev.c 	struct clk_lookup *cl;
cl                 78 drivers/clk/clkdev.c 	cl = clk_find(dev_id, con_id);
cl                 79 drivers/clk/clkdev.c 	if (cl)
cl                 80 drivers/clk/clkdev.c 		hw = cl->clk_hw;
cl                121 drivers/clk/clkdev.c static void __clkdev_add(struct clk_lookup *cl)
cl                124 drivers/clk/clkdev.c 	list_add_tail(&cl->node, &clocks);
cl                128 drivers/clk/clkdev.c void clkdev_add(struct clk_lookup *cl)
cl                130 drivers/clk/clkdev.c 	if (!cl->clk_hw)
cl                131 drivers/clk/clkdev.c 		cl->clk_hw = __clk_get_hw(cl->clk);
cl                132 drivers/clk/clkdev.c 	__clkdev_add(cl);
cl                136 drivers/clk/clkdev.c void clkdev_add_table(struct clk_lookup *cl, size_t num)
cl                140 drivers/clk/clkdev.c 		cl->clk_hw = __clk_get_hw(cl->clk);
cl                141 drivers/clk/clkdev.c 		list_add_tail(&cl->node, &clocks);
cl                142 drivers/clk/clkdev.c 		cl++;
cl                151 drivers/clk/clkdev.c 	struct clk_lookup cl;
cl                166 drivers/clk/clkdev.c 	cla->cl.clk_hw = hw;
cl                169 drivers/clk/clkdev.c 		cla->cl.con_id = cla->con_id;
cl                174 drivers/clk/clkdev.c 		cla->cl.dev_id = cla->dev_id;
cl                177 drivers/clk/clkdev.c 	return &cla->cl;
cl                184 drivers/clk/clkdev.c 	struct clk_lookup *cl;
cl                186 drivers/clk/clkdev.c 	cl = vclkdev_alloc(hw, con_id, dev_fmt, ap);
cl                187 drivers/clk/clkdev.c 	if (cl)
cl                188 drivers/clk/clkdev.c 		__clkdev_add(cl);
cl                190 drivers/clk/clkdev.c 	return cl;
cl                196 drivers/clk/clkdev.c 	struct clk_lookup *cl;
cl                200 drivers/clk/clkdev.c 	cl = vclkdev_alloc(__clk_get_hw(clk), con_id, dev_fmt, ap);
cl                203 drivers/clk/clkdev.c 	return cl;
cl                210 drivers/clk/clkdev.c 	struct clk_lookup *cl;
cl                214 drivers/clk/clkdev.c 	cl = vclkdev_alloc(hw, con_id, dev_fmt, ap);
cl                217 drivers/clk/clkdev.c 	return cl;
cl                233 drivers/clk/clkdev.c 	struct clk_lookup *cl;
cl                237 drivers/clk/clkdev.c 	cl = vclkdev_create(__clk_get_hw(clk), con_id, dev_fmt, ap);
cl                240 drivers/clk/clkdev.c 	return cl;
cl                256 drivers/clk/clkdev.c 	struct clk_lookup *cl;
cl                260 drivers/clk/clkdev.c 	cl = vclkdev_create(hw, con_id, dev_fmt, ap);
cl                263 drivers/clk/clkdev.c 	return cl;
cl                287 drivers/clk/clkdev.c void clkdev_drop(struct clk_lookup *cl)
cl                290 drivers/clk/clkdev.c 	list_del(&cl->node);
cl                292 drivers/clk/clkdev.c 	kfree(cl);
cl                300 drivers/clk/clkdev.c 	struct clk_lookup *cl;
cl                304 drivers/clk/clkdev.c 	cl = vclkdev_create(hw, con_id, dev_id, ap);
cl                307 drivers/clk/clkdev.c 	return cl;
cl                311 drivers/clk/clkdev.c 	struct clk_lookup **cl, const char *con_id, const char *dev_id)
cl                320 drivers/clk/clkdev.c 		*cl = __clk_register_clkdev(hw, con_id, "%s", dev_id);
cl                322 drivers/clk/clkdev.c 		*cl = __clk_register_clkdev(hw, con_id, NULL);
cl                324 drivers/clk/clkdev.c 	return *cl ? 0 : -ENOMEM;
cl                344 drivers/clk/clkdev.c 	struct clk_lookup *cl;
cl                349 drivers/clk/clkdev.c 	return do_clk_register_clkdev(__clk_get_hw(clk), &cl, con_id,
cl                371 drivers/clk/clkdev.c 	struct clk_lookup *cl;
cl                373 drivers/clk/clkdev.c 	return do_clk_register_clkdev(hw, &cl, con_id, dev_id);
cl                402 drivers/clk/clkdev.c 	struct clk_lookup *cl;
cl                406 drivers/clk/clkdev.c 	cl = clk_find(dev_id, con_id);
cl                409 drivers/clk/clkdev.c 	WARN_ON(!cl);
cl                411 drivers/clk/clkdev.c 			      devm_clk_match_clkdev, cl);
cl                435 drivers/clk/clkdev.c 	struct clk_lookup **cl;
cl                437 drivers/clk/clkdev.c 	cl = devres_alloc(devm_clkdev_release, sizeof(*cl), GFP_KERNEL);
cl                438 drivers/clk/clkdev.c 	if (cl) {
cl                439 drivers/clk/clkdev.c 		rval = do_clk_register_clkdev(hw, cl, con_id, dev_id);
cl                441 drivers/clk/clkdev.c 			devres_add(dev, cl);
cl                443 drivers/clk/clkdev.c 			devres_free(cl);
cl                 42 drivers/clk/hisilicon/clk-hi3660-stub.c 	struct mbox_client cl;
cl                 88 drivers/clk/hisilicon/clk-hi3660-stub.c 	dev_dbg(stub_clk_chan.cl.dev, "set rate msg[0]=0x%x msg[1]=0x%x\n",
cl                132 drivers/clk/hisilicon/clk-hi3660-stub.c 	stub_clk_chan.cl.dev = dev;
cl                133 drivers/clk/hisilicon/clk-hi3660-stub.c 	stub_clk_chan.cl.tx_done = NULL;
cl                134 drivers/clk/hisilicon/clk-hi3660-stub.c 	stub_clk_chan.cl.tx_block = false;
cl                135 drivers/clk/hisilicon/clk-hi3660-stub.c 	stub_clk_chan.cl.knows_txdone = false;
cl                138 drivers/clk/hisilicon/clk-hi3660-stub.c 	stub_clk_chan.mbox = mbox_request_channel(&stub_clk_chan.cl, 0);
cl                 50 drivers/clk/hisilicon/clk-hi6220-stub.c 	struct mbox_client cl;
cl                218 drivers/clk/hisilicon/clk-hi6220-stub.c 	stub_clk->cl.dev = dev;
cl                219 drivers/clk/hisilicon/clk-hi6220-stub.c 	stub_clk->cl.tx_done = NULL;
cl                220 drivers/clk/hisilicon/clk-hi6220-stub.c 	stub_clk->cl.tx_block = true;
cl                221 drivers/clk/hisilicon/clk-hi6220-stub.c 	stub_clk->cl.tx_tout = 500;
cl                222 drivers/clk/hisilicon/clk-hi6220-stub.c 	stub_clk->cl.knows_txdone = false;
cl                225 drivers/clk/hisilicon/clk-hi6220-stub.c 	stub_clk->mbox = mbox_request_channel(&stub_clk->cl, 0);
cl                153 drivers/clk/ti/adpll.c 	struct clk_lookup *cl;
cl                217 drivers/clk/ti/adpll.c 	struct clk_lookup *cl;
cl                231 drivers/clk/ti/adpll.c 		cl = clkdev_create(clock, con_id, NULL);
cl                232 drivers/clk/ti/adpll.c 		if (!cl)
cl                234 drivers/clk/ti/adpll.c 		d->clocks[index].cl = cl;
cl                784 drivers/clk/ti/adpll.c 		if (ac->cl)
cl                785 drivers/clk/ti/adpll.c 			clkdev_drop(ac->cl);
cl                479 drivers/clk/ti/clk.c 	struct clk_lookup *cl;
cl                487 drivers/clk/ti/clk.c 	cl = kzalloc(sizeof(*cl), GFP_KERNEL);
cl                488 drivers/clk/ti/clk.c 	if (!cl)
cl                492 drivers/clk/ti/clk.c 		cl->dev_id = dev_name(dev);
cl                493 drivers/clk/ti/clk.c 	cl->con_id = con;
cl                494 drivers/clk/ti/clk.c 	cl->clk = clk;
cl                496 drivers/clk/ti/clk.c 	clkdev_add(cl);
cl               1662 drivers/crypto/bcm/cipher.c static void spu_rx_callback(struct mbox_client *cl, void *msg)
cl               1442 drivers/dma/bcm-sba-raid.c static void sba_receive_message(struct mbox_client *cl, void *msg)
cl                105 drivers/firmware/arm_scmi/driver.c 	struct mbox_client cl;
cl                141 drivers/firmware/arm_scmi/driver.c #define client_to_scmi_chan_info(c) container_of(c, struct scmi_chan_info, cl)
cl                244 drivers/firmware/arm_scmi/driver.c static void scmi_tx_prepare(struct mbox_client *cl, void *m)
cl                247 drivers/firmware/arm_scmi/driver.c 	struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
cl                346 drivers/firmware/arm_scmi/driver.c static void scmi_rx_callback(struct mbox_client *cl, void *m)
cl                352 drivers/firmware/arm_scmi/driver.c 	struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
cl                730 drivers/firmware/arm_scmi/driver.c 	struct mbox_client *cl;
cl                751 drivers/firmware/arm_scmi/driver.c 	cl = &cinfo->cl;
cl                752 drivers/firmware/arm_scmi/driver.c 	cl->dev = dev;
cl                753 drivers/firmware/arm_scmi/driver.c 	cl->rx_callback = scmi_rx_callback;
cl                754 drivers/firmware/arm_scmi/driver.c 	cl->tx_prepare = tx ? scmi_tx_prepare : NULL;
cl                755 drivers/firmware/arm_scmi/driver.c 	cl->tx_block = false;
cl                756 drivers/firmware/arm_scmi/driver.c 	cl->knows_txdone = tx;
cl                773 drivers/firmware/arm_scmi/driver.c 	cinfo->chan = mbox_request_channel(cl, idx);
cl                233 drivers/firmware/arm_scpi.c 	struct mbox_client cl;
cl                411 drivers/firmware/arm_scpi.c 	struct scpi_chan *ch = container_of(c, struct scpi_chan, cl);
cl                425 drivers/firmware/arm_scpi.c 	struct scpi_chan *ch = container_of(c, struct scpi_chan, cl);
cl                933 drivers/firmware/arm_scpi.c 		struct mbox_client *cl = &pchan->cl;
cl                951 drivers/firmware/arm_scpi.c 		cl->dev = dev;
cl                952 drivers/firmware/arm_scpi.c 		cl->rx_callback = scpi_handle_remote_msg;
cl                953 drivers/firmware/arm_scpi.c 		cl->tx_prepare = scpi_tx_prepare;
cl                954 drivers/firmware/arm_scpi.c 		cl->tx_block = true;
cl                955 drivers/firmware/arm_scpi.c 		cl->tx_tout = 20;
cl                956 drivers/firmware/arm_scpi.c 		cl->knows_txdone = false; /* controller can't ack */
cl                965 drivers/firmware/arm_scpi.c 			pchan->chan = mbox_request_channel(cl, idx);
cl                 53 drivers/firmware/imx/imx-dsp.c 	struct imx_dsp_chan *chan = container_of(c, struct imx_dsp_chan, cl);
cl                 68 drivers/firmware/imx/imx-dsp.c 	struct mbox_client *cl;
cl                 89 drivers/firmware/imx/imx-dsp.c 		cl = &dsp_chan->cl;
cl                 90 drivers/firmware/imx/imx-dsp.c 		cl->dev = dev;
cl                 91 drivers/firmware/imx/imx-dsp.c 		cl->tx_block = false;
cl                 92 drivers/firmware/imx/imx-dsp.c 		cl->knows_txdone = true;
cl                 93 drivers/firmware/imx/imx-dsp.c 		cl->rx_callback = imx_dsp_handle_rx;
cl                 97 drivers/firmware/imx/imx-dsp.c 		dsp_chan->ch = mbox_request_channel_byname(cl, chan_name);
cl                133 drivers/firmware/imx/imx-scu-irq.c 	struct mbox_client *cl;
cl                141 drivers/firmware/imx/imx-scu-irq.c 	cl = devm_kzalloc(dev, sizeof(*cl), GFP_KERNEL);
cl                142 drivers/firmware/imx/imx-scu-irq.c 	if (!cl)
cl                145 drivers/firmware/imx/imx-scu-irq.c 	cl->dev = dev;
cl                146 drivers/firmware/imx/imx-scu-irq.c 	cl->rx_callback = imx_scu_irq_callback;
cl                149 drivers/firmware/imx/imx-scu-irq.c 	ch = mbox_request_channel_byname(cl, "gip3");
cl                153 drivers/firmware/imx/imx-scu-irq.c 		devm_kfree(dev, cl);
cl                 29 drivers/firmware/imx/imx-scu.c 	struct mbox_client cl;
cl                106 drivers/firmware/imx/imx-scu.c static void imx_scu_tx_done(struct mbox_client *cl, void *mssg, int r)
cl                108 drivers/firmware/imx/imx-scu.c 	struct imx_sc_chan *sc_chan = container_of(cl, struct imx_sc_chan, cl);
cl                115 drivers/firmware/imx/imx-scu.c 	struct imx_sc_chan *sc_chan = container_of(c, struct imx_sc_chan, cl);
cl                252 drivers/firmware/imx/imx-scu.c 	struct mbox_client *cl;
cl                282 drivers/firmware/imx/imx-scu.c 		cl = &sc_chan->cl;
cl                283 drivers/firmware/imx/imx-scu.c 		cl->dev = dev;
cl                284 drivers/firmware/imx/imx-scu.c 		cl->tx_block = false;
cl                285 drivers/firmware/imx/imx-scu.c 		cl->knows_txdone = true;
cl                286 drivers/firmware/imx/imx-scu.c 		cl->rx_callback = imx_scu_rx_callback;
cl                290 drivers/firmware/imx/imx-scu.c 			cl->tx_done = imx_scu_tx_done;
cl                297 drivers/firmware/imx/imx-scu.c 		sc_chan->ch = mbox_request_channel_byname(cl, chan_name);
cl                 26 drivers/firmware/raspberrypi.c 	struct mbox_client cl;
cl                 34 drivers/firmware/raspberrypi.c static void response_callback(struct mbox_client *cl, void *msg)
cl                 36 drivers/firmware/raspberrypi.c 	struct rpi_firmware *fw = container_of(cl, struct rpi_firmware, cl);
cl                 63 drivers/firmware/raspberrypi.c 		dev_err(fw->cl.dev, "mbox_send_message returned %d\n", ret);
cl                 96 drivers/firmware/raspberrypi.c 	buf = dma_alloc_coherent(fw->cl.dev, PAGE_ALIGN(size), &bus_addr,
cl                120 drivers/firmware/raspberrypi.c 		dev_err(fw->cl.dev, "Request 0x%08x returned status 0x%08x\n",
cl                125 drivers/firmware/raspberrypi.c 	dma_free_coherent(fw->cl.dev, PAGE_ALIGN(size), buf, bus_addr);
cl                190 drivers/firmware/raspberrypi.c 		dev_info(fw->cl.dev,
cl                226 drivers/firmware/raspberrypi.c 	fw->cl.dev = dev;
cl                227 drivers/firmware/raspberrypi.c 	fw->cl.rx_callback = response_callback;
cl                228 drivers/firmware/raspberrypi.c 	fw->cl.tx_block = true;
cl                230 drivers/firmware/raspberrypi.c 	fw->chan = mbox_request_channel(&fw->cl, 0);
cl                127 drivers/firmware/ti_sci.c 	struct mbox_client cl;
cl                138 drivers/firmware/ti_sci.c #define cl_to_ti_sci_info(c)	container_of(c, struct ti_sci_info, cl)
cl                260 drivers/firmware/ti_sci.c static void ti_sci_rx_callback(struct mbox_client *cl, void *m)
cl                262 drivers/firmware/ti_sci.c 	struct ti_sci_info *info = cl_to_ti_sci_info(cl);
cl               3394 drivers/firmware/ti_sci.c 	struct mbox_client *cl;
cl               3472 drivers/firmware/ti_sci.c 	cl = &info->cl;
cl               3473 drivers/firmware/ti_sci.c 	cl->dev = dev;
cl               3474 drivers/firmware/ti_sci.c 	cl->tx_block = false;
cl               3475 drivers/firmware/ti_sci.c 	cl->rx_callback = ti_sci_rx_callback;
cl               3476 drivers/firmware/ti_sci.c 	cl->knows_txdone = true;
cl               3481 drivers/firmware/ti_sci.c 	info->chan_rx = mbox_request_channel_byname(cl, "rx");
cl               3487 drivers/firmware/ti_sci.c 	info->chan_tx = mbox_request_channel_byname(cl, "tx");
cl                156 drivers/firmware/turris-mox-rwtm.c static void mox_rwtm_rx_callback(struct mbox_client *cl, void *data)
cl                158 drivers/firmware/turris-mox-rwtm.c 	struct mox_rwtm *rwtm = dev_get_drvdata(cl->dev);
cl                 93 drivers/gpu/drm/bridge/parade-ps8622.c 	struct i2c_client *cl = ps8622->client;
cl                 97 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x02, 0xa1, 0x01);
cl                102 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x04, 0x14, 0x01);
cl                107 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x04, 0xe3, 0x20);
cl                112 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x04, 0xe2, 0x80);
cl                120 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x04, 0x8a, 0x0c);
cl                125 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x04, 0x89, 0x08);
cl                130 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x04, 0x71, 0x2d);
cl                135 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x04, 0x7d, 0x07);
cl                140 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x04, 0x7b, 0x00);
cl                145 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x04, 0x7a, 0xfd);
cl                150 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x04, 0xc0, 0x12);
cl                155 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x04, 0xc1, 0x92);
cl                160 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x04, 0xc2, 0x1c);
cl                165 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x04, 0x32, 0x80);
cl                170 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x04, 0x00, 0xb0);
cl                175 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x04, 0x15, 0x40);
cl                180 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x04, 0x54, 0x10);
cl                186 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x01, 0x02, 0x80 | ps8622->max_lane_count);
cl                191 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x01, 0x21, 0x80 | ps8622->lane_count);
cl                195 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x00, 0x52, 0x20);
cl                200 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x00, 0xf1, 0x03);
cl                204 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x00, 0x62, 0x41);
cl                209 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x00, 0xf6, 0x01);
cl                214 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x00, 0x77, 0x06);
cl                219 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x00, 0x4c, 0x04);
cl                224 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x01, 0xc0, 0x00);
cl                229 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x01, 0xc1, 0x1c);
cl                234 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x01, 0xc2, 0xf8);
cl                239 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x01, 0xc3, 0x44);
cl                244 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x01, 0xc4, 0x32);
cl                249 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x01, 0xc5, 0x53);
cl                254 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x01, 0xc6, 0x4c);
cl                259 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x01, 0xc7, 0x56);
cl                264 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x01, 0xc8, 0x35);
cl                269 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x01, 0xca, 0x01);
cl                274 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x01, 0xcb, 0x05);
cl                281 drivers/gpu/drm/bridge/parade-ps8622.c 		err = ps8622_set(cl, 0x01, 0xa5, 0xa0);
cl                286 drivers/gpu/drm/bridge/parade-ps8622.c 		err = ps8622_set(cl, 0x01, 0xa7,
cl                292 drivers/gpu/drm/bridge/parade-ps8622.c 		err = ps8622_set(cl, 0x01, 0xa5, 0x80);
cl                298 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x01, 0xcc, 0x13);
cl                303 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x02, 0xb1, 0x20);
cl                308 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x04, 0x10, 0x16);
cl                314 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x04, 0x59, 0x60);
cl                319 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x04, 0x54, 0x14);
cl                324 drivers/gpu/drm/bridge/parade-ps8622.c 	err = ps8622_set(cl, 0x02, 0xa1, 0x91);
cl                485 drivers/gpu/drm/drm_lease.c 	struct drm_mode_create_lease *cl = data;
cl                508 drivers/gpu/drm/drm_lease.c 	if (cl->object_count == 0) {
cl                513 drivers/gpu/drm/drm_lease.c 	if (cl->flags && (cl->flags & ~(O_CLOEXEC | O_NONBLOCK))) {
cl                518 drivers/gpu/drm/drm_lease.c 	object_count = cl->object_count;
cl                520 drivers/gpu/drm/drm_lease.c 	object_ids = memdup_user(u64_to_user_ptr(cl->object_ids),
cl                538 drivers/gpu/drm/drm_lease.c 	fd = get_unused_fd_flags(cl->flags & (O_CLOEXEC | O_NONBLOCK));
cl                571 drivers/gpu/drm/drm_lease.c 	cl->fd = fd;
cl                572 drivers/gpu/drm/drm_lease.c 	cl->lessee_id = lessee->lessee_id;
cl                127 drivers/gpu/drm/i915/gt/intel_timeline.c static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
cl                129 drivers/gpu/drm/i915/gt/intel_timeline.c 	GEM_BUG_ON(!i915_active_is_idle(&cl->active));
cl                131 drivers/gpu/drm/i915/gt/intel_timeline.c 	i915_gem_object_unpin_map(cl->hwsp->vma->obj);
cl                132 drivers/gpu/drm/i915/gt/intel_timeline.c 	i915_vma_put(cl->hwsp->vma);
cl                133 drivers/gpu/drm/i915/gt/intel_timeline.c 	__idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
cl                135 drivers/gpu/drm/i915/gt/intel_timeline.c 	i915_active_fini(&cl->active);
cl                136 drivers/gpu/drm/i915/gt/intel_timeline.c 	kfree(cl);
cl                141 drivers/gpu/drm/i915/gt/intel_timeline.c 	struct intel_timeline_cacheline *cl =
cl                142 drivers/gpu/drm/i915/gt/intel_timeline.c 		container_of(active, typeof(*cl), active);
cl                144 drivers/gpu/drm/i915/gt/intel_timeline.c 	i915_vma_unpin(cl->hwsp->vma);
cl                145 drivers/gpu/drm/i915/gt/intel_timeline.c 	if (ptr_test_bit(cl->vaddr, CACHELINE_FREE))
cl                146 drivers/gpu/drm/i915/gt/intel_timeline.c 		__idle_cacheline_free(cl);
cl                151 drivers/gpu/drm/i915/gt/intel_timeline.c 	struct intel_timeline_cacheline *cl =
cl                152 drivers/gpu/drm/i915/gt/intel_timeline.c 		container_of(active, typeof(*cl), active);
cl                154 drivers/gpu/drm/i915/gt/intel_timeline.c 	__i915_vma_pin(cl->hwsp->vma);
cl                161 drivers/gpu/drm/i915/gt/intel_timeline.c 	struct intel_timeline_cacheline *cl;
cl                166 drivers/gpu/drm/i915/gt/intel_timeline.c 	cl = kmalloc(sizeof(*cl), GFP_KERNEL);
cl                167 drivers/gpu/drm/i915/gt/intel_timeline.c 	if (!cl)
cl                172 drivers/gpu/drm/i915/gt/intel_timeline.c 		kfree(cl);
cl                177 drivers/gpu/drm/i915/gt/intel_timeline.c 	cl->hwsp = hwsp;
cl                178 drivers/gpu/drm/i915/gt/intel_timeline.c 	cl->vaddr = page_pack_bits(vaddr, cacheline);
cl                180 drivers/gpu/drm/i915/gt/intel_timeline.c 	i915_active_init(hwsp->gt->i915, &cl->active,
cl                183 drivers/gpu/drm/i915/gt/intel_timeline.c 	return cl;
cl                186 drivers/gpu/drm/i915/gt/intel_timeline.c static void cacheline_acquire(struct intel_timeline_cacheline *cl)
cl                188 drivers/gpu/drm/i915/gt/intel_timeline.c 	if (cl)
cl                189 drivers/gpu/drm/i915/gt/intel_timeline.c 		i915_active_acquire(&cl->active);
cl                192 drivers/gpu/drm/i915/gt/intel_timeline.c static void cacheline_release(struct intel_timeline_cacheline *cl)
cl                194 drivers/gpu/drm/i915/gt/intel_timeline.c 	if (cl)
cl                195 drivers/gpu/drm/i915/gt/intel_timeline.c 		i915_active_release(&cl->active);
cl                198 drivers/gpu/drm/i915/gt/intel_timeline.c static void cacheline_free(struct intel_timeline_cacheline *cl)
cl                200 drivers/gpu/drm/i915/gt/intel_timeline.c 	GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE));
cl                201 drivers/gpu/drm/i915/gt/intel_timeline.c 	cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE);
cl                203 drivers/gpu/drm/i915/gt/intel_timeline.c 	if (i915_active_is_idle(&cl->active))
cl                204 drivers/gpu/drm/i915/gt/intel_timeline.c 		__idle_cacheline_free(cl);
cl                222 drivers/gpu/drm/i915/gt/intel_timeline.c 		struct intel_timeline_cacheline *cl;
cl                229 drivers/gpu/drm/i915/gt/intel_timeline.c 		cl = cacheline_alloc(hwsp->private, cacheline);
cl                230 drivers/gpu/drm/i915/gt/intel_timeline.c 		if (IS_ERR(cl)) {
cl                232 drivers/gpu/drm/i915/gt/intel_timeline.c 			return PTR_ERR(cl);
cl                235 drivers/gpu/drm/i915/gt/intel_timeline.c 		timeline->hwsp_cacheline = cl;
cl                238 drivers/gpu/drm/i915/gt/intel_timeline.c 		vaddr = page_mask_bits(cl->vaddr);
cl                395 drivers/gpu/drm/i915/gt/intel_timeline.c 	struct intel_timeline_cacheline *cl;
cl                432 drivers/gpu/drm/i915/gt/intel_timeline.c 	cl = cacheline_alloc(vma->private, cacheline);
cl                433 drivers/gpu/drm/i915/gt/intel_timeline.c 	if (IS_ERR(cl)) {
cl                434 drivers/gpu/drm/i915/gt/intel_timeline.c 		err = PTR_ERR(cl);
cl                438 drivers/gpu/drm/i915/gt/intel_timeline.c 	GEM_BUG_ON(cl->hwsp->vma != vma);
cl                457 drivers/gpu/drm/i915/gt/intel_timeline.c 	vaddr = page_mask_bits(cl->vaddr);
cl                464 drivers/gpu/drm/i915/gt/intel_timeline.c 	cacheline_acquire(cl);
cl                465 drivers/gpu/drm/i915/gt/intel_timeline.c 	tl->hwsp_cacheline = cl;
cl                472 drivers/gpu/drm/i915/gt/intel_timeline.c 	cacheline_free(cl);
cl                493 drivers/gpu/drm/i915/gt/intel_timeline.c static int cacheline_ref(struct intel_timeline_cacheline *cl,
cl                496 drivers/gpu/drm/i915/gt/intel_timeline.c 	return i915_active_ref(&cl->active, rq->timeline, rq);
cl                503 drivers/gpu/drm/i915/gt/intel_timeline.c 	struct intel_timeline_cacheline *cl = from->hwsp_cacheline;
cl                512 drivers/gpu/drm/i915/gt/intel_timeline.c 		err = cacheline_ref(cl, to);
cl                514 drivers/gpu/drm/i915/gt/intel_timeline.c 		if (likely(cl == tl->hwsp_cacheline)) {
cl                517 drivers/gpu/drm/i915/gt/intel_timeline.c 			*hwsp = i915_ggtt_offset(cl->hwsp->vma) +
cl                518 drivers/gpu/drm/i915/gt/intel_timeline.c 				ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) *
cl                142 drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c 	const struct nvkm_enum *en, *re, *cl, *sc;
cl                176 drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c 	cl = nvkm_enum_find(vm_client, st2);
cl                177 drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c 	if      (cl && cl->data) sc = nvkm_enum_find(cl->data, st3);
cl                190 drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c 		   st2, cl ? cl->name : "", st3, sc ? sc->name : "",
cl                363 drivers/gpu/host1x/bus.c 	struct host1x_client *client, *cl;
cl                398 drivers/gpu/host1x/bus.c 	list_for_each_entry_safe(client, cl, &device->clients, list)
cl                702 drivers/hid/intel-ish-hid/ishtp/bus.c int ishtp_cl_device_bind(struct ishtp_cl *cl)
cl                708 drivers/hid/intel-ish-hid/ishtp/bus.c 	if (!cl->fw_client_id || cl->state != ISHTP_CL_CONNECTED)
cl                712 drivers/hid/intel-ish-hid/ishtp/bus.c 	spin_lock_irqsave(&cl->dev->device_list_lock, flags);
cl                713 drivers/hid/intel-ish-hid/ishtp/bus.c 	list_for_each_entry(cl_device, &cl->dev->device_list,
cl                716 drivers/hid/intel-ish-hid/ishtp/bus.c 		    cl_device->fw_client->client_id == cl->fw_client_id) {
cl                717 drivers/hid/intel-ish-hid/ishtp/bus.c 			cl->device = cl_device;
cl                722 drivers/hid/intel-ish-hid/ishtp/bus.c 	spin_unlock_irqrestore(&cl->dev->device_list_lock, flags);
cl                740 drivers/hid/intel-ish-hid/ishtp/bus.c 	struct ishtp_cl	*cl;
cl                744 drivers/hid/intel-ish-hid/ishtp/bus.c 	list_for_each_entry(cl, &ishtp_dev->cl_list, link) {
cl                745 drivers/hid/intel-ish-hid/ishtp/bus.c 		cl->state = ISHTP_CL_DISCONNECTED;
cl                752 drivers/hid/intel-ish-hid/ishtp/bus.c 		wake_up_interruptible(&cl->wait_ctrl_res);
cl                755 drivers/hid/intel-ish-hid/ishtp/bus.c 		ishtp_cl_flush_queues(cl);
cl                758 drivers/hid/intel-ish-hid/ishtp/bus.c 		ishtp_cl_free_rx_ring(cl);
cl                759 drivers/hid/intel-ish-hid/ishtp/bus.c 		ishtp_cl_free_tx_ring(cl);
cl                 50 drivers/hid/intel-ish-hid/ishtp/bus.h int	ishtp_cl_device_bind(struct ishtp_cl *cl);
cl                 19 drivers/hid/intel-ish-hid/ishtp/client-buffers.c int ishtp_cl_alloc_rx_ring(struct ishtp_cl *cl)
cl                 21 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	size_t	len = cl->device->fw_client->props.max_msg_length;
cl                 27 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	for (j = 0; j < cl->rx_ring_size; ++j) {
cl                 28 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 		rb = ishtp_io_rb_init(cl);
cl                 36 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 		spin_lock_irqsave(&cl->free_list_spinlock, flags);
cl                 37 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 		list_add_tail(&rb->list, &cl->free_rb_list.list);
cl                 38 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 		spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
cl                 44 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	dev_err(&cl->device->dev, "error in allocating Rx buffers\n");
cl                 45 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	ishtp_cl_free_rx_ring(cl);
cl                 57 drivers/hid/intel-ish-hid/ishtp/client-buffers.c int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl)
cl                 59 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	size_t	len = cl->device->fw_client->props.max_msg_length;
cl                 63 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	cl->tx_ring_free_size = 0;
cl                 66 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	for (j = 0; j < cl->tx_ring_size; ++j) {
cl                 79 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 		spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
cl                 80 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 		list_add_tail(&tx_buf->list, &cl->tx_free_list.list);
cl                 81 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 		++cl->tx_ring_free_size;
cl                 82 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 		spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
cl                 86 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	dev_err(&cl->device->dev, "error in allocating Tx pool\n");
cl                 87 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	ishtp_cl_free_tx_ring(cl);
cl                 97 drivers/hid/intel-ish-hid/ishtp/client-buffers.c void ishtp_cl_free_rx_ring(struct ishtp_cl *cl)
cl                103 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	spin_lock_irqsave(&cl->free_list_spinlock, flags);
cl                104 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	while (!list_empty(&cl->free_rb_list.list)) {
cl                105 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 		rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb,
cl                111 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
cl                113 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	spin_lock_irqsave(&cl->in_process_spinlock, flags);
cl                114 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	while (!list_empty(&cl->in_process_list.list)) {
cl                115 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 		rb = list_entry(cl->in_process_list.list.next,
cl                121 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
cl                130 drivers/hid/intel-ish-hid/ishtp/client-buffers.c void ishtp_cl_free_tx_ring(struct ishtp_cl *cl)
cl                135 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
cl                137 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	while (!list_empty(&cl->tx_free_list.list)) {
cl                138 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 		tx_buf = list_entry(cl->tx_free_list.list.next,
cl                141 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 		--cl->tx_ring_free_size;
cl                145 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
cl                147 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	spin_lock_irqsave(&cl->tx_list_spinlock, flags);
cl                149 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	while (!list_empty(&cl->tx_list.list)) {
cl                150 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 		tx_buf = list_entry(cl->tx_list.list.next,
cl                156 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	spin_unlock_irqrestore(&cl->tx_list_spinlock, flags);
cl                182 drivers/hid/intel-ish-hid/ishtp/client-buffers.c struct ishtp_cl_rb *ishtp_io_rb_init(struct ishtp_cl *cl)
cl                191 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	rb->cl = cl;
cl                231 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	struct ishtp_cl *cl;
cl                235 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	if (!rb || !rb->cl)
cl                238 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	cl = rb->cl;
cl                239 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	spin_lock_irqsave(&cl->free_list_spinlock, flags);
cl                240 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	list_add_tail(&rb->list, &cl->free_rb_list.list);
cl                241 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
cl                247 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	if (!cl->out_flow_ctrl_creds)
cl                248 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 		rets = ishtp_cl_read_start(cl);
cl                262 drivers/hid/intel-ish-hid/ishtp/client-buffers.c bool ishtp_cl_tx_empty(struct ishtp_cl *cl)
cl                267 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
cl                268 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	tx_list_empty = list_empty(&cl->tx_list.list);
cl                269 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
cl                283 drivers/hid/intel-ish-hid/ishtp/client-buffers.c struct ishtp_cl_rb *ishtp_cl_rx_get_rb(struct ishtp_cl *cl)
cl                288 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	spin_lock_irqsave(&cl->in_process_spinlock, rx_flags);
cl                289 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	rb = list_first_entry_or_null(&cl->in_process_list.list,
cl                293 drivers/hid/intel-ish-hid/ishtp/client-buffers.c 	spin_unlock_irqrestore(&cl->in_process_spinlock, rx_flags);
cl                 16 drivers/hid/intel-ish-hid/ishtp/client.c int ishtp_cl_get_tx_free_buffer_size(struct ishtp_cl *cl)
cl                 21 drivers/hid/intel-ish-hid/ishtp/client.c 	spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
cl                 22 drivers/hid/intel-ish-hid/ishtp/client.c 	size = cl->tx_ring_free_size * cl->device->fw_client->props.max_msg_length;
cl                 23 drivers/hid/intel-ish-hid/ishtp/client.c 	spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
cl                 29 drivers/hid/intel-ish-hid/ishtp/client.c int ishtp_cl_get_tx_free_rings(struct ishtp_cl *cl)
cl                 31 drivers/hid/intel-ish-hid/ishtp/client.c 	return cl->tx_ring_free_size;
cl                 41 drivers/hid/intel-ish-hid/ishtp/client.c static void ishtp_read_list_flush(struct ishtp_cl *cl)
cl                 47 drivers/hid/intel-ish-hid/ishtp/client.c 	spin_lock_irqsave(&cl->dev->read_list_spinlock, flags);
cl                 48 drivers/hid/intel-ish-hid/ishtp/client.c 	list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list)
cl                 49 drivers/hid/intel-ish-hid/ishtp/client.c 		if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) {
cl                 53 drivers/hid/intel-ish-hid/ishtp/client.c 	spin_unlock_irqrestore(&cl->dev->read_list_spinlock, flags);
cl                 65 drivers/hid/intel-ish-hid/ishtp/client.c int ishtp_cl_flush_queues(struct ishtp_cl *cl)
cl                 67 drivers/hid/intel-ish-hid/ishtp/client.c 	if (WARN_ON(!cl || !cl->dev))
cl                 70 drivers/hid/intel-ish-hid/ishtp/client.c 	ishtp_read_list_flush(cl);
cl                 84 drivers/hid/intel-ish-hid/ishtp/client.c static void ishtp_cl_init(struct ishtp_cl *cl, struct ishtp_device *dev)
cl                 86 drivers/hid/intel-ish-hid/ishtp/client.c 	memset(cl, 0, sizeof(struct ishtp_cl));
cl                 87 drivers/hid/intel-ish-hid/ishtp/client.c 	init_waitqueue_head(&cl->wait_ctrl_res);
cl                 88 drivers/hid/intel-ish-hid/ishtp/client.c 	spin_lock_init(&cl->free_list_spinlock);
cl                 89 drivers/hid/intel-ish-hid/ishtp/client.c 	spin_lock_init(&cl->in_process_spinlock);
cl                 90 drivers/hid/intel-ish-hid/ishtp/client.c 	spin_lock_init(&cl->tx_list_spinlock);
cl                 91 drivers/hid/intel-ish-hid/ishtp/client.c 	spin_lock_init(&cl->tx_free_list_spinlock);
cl                 92 drivers/hid/intel-ish-hid/ishtp/client.c 	spin_lock_init(&cl->fc_spinlock);
cl                 93 drivers/hid/intel-ish-hid/ishtp/client.c 	INIT_LIST_HEAD(&cl->link);
cl                 94 drivers/hid/intel-ish-hid/ishtp/client.c 	cl->dev = dev;
cl                 96 drivers/hid/intel-ish-hid/ishtp/client.c 	INIT_LIST_HEAD(&cl->free_rb_list.list);
cl                 97 drivers/hid/intel-ish-hid/ishtp/client.c 	INIT_LIST_HEAD(&cl->tx_list.list);
cl                 98 drivers/hid/intel-ish-hid/ishtp/client.c 	INIT_LIST_HEAD(&cl->tx_free_list.list);
cl                 99 drivers/hid/intel-ish-hid/ishtp/client.c 	INIT_LIST_HEAD(&cl->in_process_list.list);
cl                101 drivers/hid/intel-ish-hid/ishtp/client.c 	cl->rx_ring_size = CL_DEF_RX_RING_SIZE;
cl                102 drivers/hid/intel-ish-hid/ishtp/client.c 	cl->tx_ring_size = CL_DEF_TX_RING_SIZE;
cl                103 drivers/hid/intel-ish-hid/ishtp/client.c 	cl->tx_ring_free_size = cl->tx_ring_size;
cl                106 drivers/hid/intel-ish-hid/ishtp/client.c 	cl->last_tx_path = CL_TX_PATH_IPC;
cl                107 drivers/hid/intel-ish-hid/ishtp/client.c 	cl->last_dma_acked = 1;
cl                108 drivers/hid/intel-ish-hid/ishtp/client.c 	cl->last_dma_addr = NULL;
cl                109 drivers/hid/intel-ish-hid/ishtp/client.c 	cl->last_ipc_acked = 1;
cl                122 drivers/hid/intel-ish-hid/ishtp/client.c 	struct ishtp_cl *cl;
cl                124 drivers/hid/intel-ish-hid/ishtp/client.c 	cl = kmalloc(sizeof(struct ishtp_cl), GFP_KERNEL);
cl                125 drivers/hid/intel-ish-hid/ishtp/client.c 	if (!cl)
cl                128 drivers/hid/intel-ish-hid/ishtp/client.c 	ishtp_cl_init(cl, cl_device->ishtp_dev);
cl                129 drivers/hid/intel-ish-hid/ishtp/client.c 	return cl;
cl                139 drivers/hid/intel-ish-hid/ishtp/client.c void	ishtp_cl_free(struct ishtp_cl *cl)
cl                144 drivers/hid/intel-ish-hid/ishtp/client.c 	if (!cl)
cl                147 drivers/hid/intel-ish-hid/ishtp/client.c 	dev = cl->dev;
cl                152 drivers/hid/intel-ish-hid/ishtp/client.c 	ishtp_cl_free_rx_ring(cl);
cl                153 drivers/hid/intel-ish-hid/ishtp/client.c 	ishtp_cl_free_tx_ring(cl);
cl                154 drivers/hid/intel-ish-hid/ishtp/client.c 	kfree(cl);
cl                170 drivers/hid/intel-ish-hid/ishtp/client.c int ishtp_cl_link(struct ishtp_cl *cl)
cl                176 drivers/hid/intel-ish-hid/ishtp/client.c 	if (WARN_ON(!cl || !cl->dev))
cl                179 drivers/hid/intel-ish-hid/ishtp/client.c 	dev = cl->dev;
cl                192 drivers/hid/intel-ish-hid/ishtp/client.c 		dev_err(&cl->device->dev, "id exceeded %d", ISHTP_CLIENTS_MAX);
cl                197 drivers/hid/intel-ish-hid/ishtp/client.c 	cl->host_client_id = id;
cl                203 drivers/hid/intel-ish-hid/ishtp/client.c 	list_add_tail(&cl->link, &dev->cl_list);
cl                205 drivers/hid/intel-ish-hid/ishtp/client.c 	cl->state = ISHTP_CL_INITIALIZING;
cl                221 drivers/hid/intel-ish-hid/ishtp/client.c void ishtp_cl_unlink(struct ishtp_cl *cl)
cl                228 drivers/hid/intel-ish-hid/ishtp/client.c 	if (!cl || !cl->dev)
cl                231 drivers/hid/intel-ish-hid/ishtp/client.c 	dev = cl->dev;
cl                235 drivers/hid/intel-ish-hid/ishtp/client.c 		clear_bit(cl->host_client_id, dev->host_clients_map);
cl                246 drivers/hid/intel-ish-hid/ishtp/client.c 		if (cl->host_client_id == pos->host_client_id) {
cl                263 drivers/hid/intel-ish-hid/ishtp/client.c int ishtp_cl_disconnect(struct ishtp_cl *cl)
cl                268 drivers/hid/intel-ish-hid/ishtp/client.c 	if (WARN_ON(!cl || !cl->dev))
cl                271 drivers/hid/intel-ish-hid/ishtp/client.c 	dev = cl->dev;
cl                273 drivers/hid/intel-ish-hid/ishtp/client.c 	dev->print_log(dev, "%s() state %d\n", __func__, cl->state);
cl                275 drivers/hid/intel-ish-hid/ishtp/client.c 	if (cl->state != ISHTP_CL_DISCONNECTING) {
cl                280 drivers/hid/intel-ish-hid/ishtp/client.c 	if (ishtp_hbm_cl_disconnect_req(dev, cl)) {
cl                282 drivers/hid/intel-ish-hid/ishtp/client.c 		dev_err(&cl->device->dev, "failed to disconnect.\n");
cl                286 drivers/hid/intel-ish-hid/ishtp/client.c 	err = wait_event_interruptible_timeout(cl->wait_ctrl_res,
cl                288 drivers/hid/intel-ish-hid/ishtp/client.c 			cl->state == ISHTP_CL_DISCONNECTED),
cl                301 drivers/hid/intel-ish-hid/ishtp/client.c 	if (cl->state == ISHTP_CL_DISCONNECTED) {
cl                318 drivers/hid/intel-ish-hid/ishtp/client.c static bool ishtp_cl_is_other_connecting(struct ishtp_cl *cl)
cl                324 drivers/hid/intel-ish-hid/ishtp/client.c 	if (WARN_ON(!cl || !cl->dev))
cl                327 drivers/hid/intel-ish-hid/ishtp/client.c 	dev = cl->dev;
cl                330 drivers/hid/intel-ish-hid/ishtp/client.c 		if ((pos->state == ISHTP_CL_CONNECTING) && (pos != cl) &&
cl                331 drivers/hid/intel-ish-hid/ishtp/client.c 				cl->fw_client_id == pos->fw_client_id) {
cl                351 drivers/hid/intel-ish-hid/ishtp/client.c int ishtp_cl_connect(struct ishtp_cl *cl)
cl                356 drivers/hid/intel-ish-hid/ishtp/client.c 	if (WARN_ON(!cl || !cl->dev))
cl                359 drivers/hid/intel-ish-hid/ishtp/client.c 	dev = cl->dev;
cl                361 drivers/hid/intel-ish-hid/ishtp/client.c 	dev->print_log(dev, "%s() current_state = %d\n", __func__, cl->state);
cl                363 drivers/hid/intel-ish-hid/ishtp/client.c 	if (ishtp_cl_is_other_connecting(cl)) {
cl                368 drivers/hid/intel-ish-hid/ishtp/client.c 	if (ishtp_hbm_cl_connect_req(dev, cl)) {
cl                373 drivers/hid/intel-ish-hid/ishtp/client.c 	rets = wait_event_interruptible_timeout(cl->wait_ctrl_res,
cl                375 drivers/hid/intel-ish-hid/ishtp/client.c 				(cl->state == ISHTP_CL_CONNECTED ||
cl                376 drivers/hid/intel-ish-hid/ishtp/client.c 				 cl->state == ISHTP_CL_DISCONNECTED)),
cl                389 drivers/hid/intel-ish-hid/ishtp/client.c 	if (cl->state != ISHTP_CL_CONNECTED) {
cl                395 drivers/hid/intel-ish-hid/ishtp/client.c 	rets = cl->status;
cl                401 drivers/hid/intel-ish-hid/ishtp/client.c 	rets = ishtp_cl_device_bind(cl);
cl                404 drivers/hid/intel-ish-hid/ishtp/client.c 		ishtp_cl_disconnect(cl);
cl                408 drivers/hid/intel-ish-hid/ishtp/client.c 	rets = ishtp_cl_alloc_rx_ring(cl);
cl                412 drivers/hid/intel-ish-hid/ishtp/client.c 		ishtp_cl_disconnect(cl);
cl                416 drivers/hid/intel-ish-hid/ishtp/client.c 	rets = ishtp_cl_alloc_tx_ring(cl);
cl                420 drivers/hid/intel-ish-hid/ishtp/client.c 		ishtp_cl_free_rx_ring(cl);
cl                421 drivers/hid/intel-ish-hid/ishtp/client.c 		ishtp_cl_disconnect(cl);
cl                426 drivers/hid/intel-ish-hid/ishtp/client.c 	rets = ishtp_cl_read_start(cl);
cl                444 drivers/hid/intel-ish-hid/ishtp/client.c int ishtp_cl_read_start(struct ishtp_cl *cl)
cl                453 drivers/hid/intel-ish-hid/ishtp/client.c 	if (WARN_ON(!cl || !cl->dev))
cl                456 drivers/hid/intel-ish-hid/ishtp/client.c 	dev = cl->dev;
cl                458 drivers/hid/intel-ish-hid/ishtp/client.c 	if (cl->state != ISHTP_CL_CONNECTED)
cl                464 drivers/hid/intel-ish-hid/ishtp/client.c 	i = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
cl                466 drivers/hid/intel-ish-hid/ishtp/client.c 		dev_err(&cl->device->dev, "no such fw client %d\n",
cl                467 drivers/hid/intel-ish-hid/ishtp/client.c 			cl->fw_client_id);
cl                472 drivers/hid/intel-ish-hid/ishtp/client.c 	spin_lock_irqsave(&cl->free_list_spinlock, flags);
cl                473 drivers/hid/intel-ish-hid/ishtp/client.c 	if (list_empty(&cl->free_rb_list.list)) {
cl                474 drivers/hid/intel-ish-hid/ishtp/client.c 		dev_warn(&cl->device->dev,
cl                478 drivers/hid/intel-ish-hid/ishtp/client.c 		spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
cl                481 drivers/hid/intel-ish-hid/ishtp/client.c 	rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb, list);
cl                483 drivers/hid/intel-ish-hid/ishtp/client.c 	spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
cl                485 drivers/hid/intel-ish-hid/ishtp/client.c 	rb->cl = cl;
cl                498 drivers/hid/intel-ish-hid/ishtp/client.c 	if (ishtp_hbm_cl_flow_control_req(dev, cl)) {
cl                509 drivers/hid/intel-ish-hid/ishtp/client.c 		spin_lock_irqsave(&cl->free_list_spinlock, flags);
cl                510 drivers/hid/intel-ish-hid/ishtp/client.c 		list_add_tail(&rb->list, &cl->free_rb_list.list);
cl                511 drivers/hid/intel-ish-hid/ishtp/client.c 		spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
cl                528 drivers/hid/intel-ish-hid/ishtp/client.c int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length)
cl                536 drivers/hid/intel-ish-hid/ishtp/client.c 	if (WARN_ON(!cl || !cl->dev))
cl                539 drivers/hid/intel-ish-hid/ishtp/client.c 	dev = cl->dev;
cl                541 drivers/hid/intel-ish-hid/ishtp/client.c 	if (cl->state != ISHTP_CL_CONNECTED) {
cl                542 drivers/hid/intel-ish-hid/ishtp/client.c 		++cl->err_send_msg;
cl                547 drivers/hid/intel-ish-hid/ishtp/client.c 		++cl->err_send_msg;
cl                552 drivers/hid/intel-ish-hid/ishtp/client.c 	id = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
cl                554 drivers/hid/intel-ish-hid/ishtp/client.c 		++cl->err_send_msg;
cl                559 drivers/hid/intel-ish-hid/ishtp/client.c 		++cl->err_send_msg;
cl                564 drivers/hid/intel-ish-hid/ishtp/client.c 	spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
cl                565 drivers/hid/intel-ish-hid/ishtp/client.c 	if (list_empty(&cl->tx_free_list.list)) {
cl                566 drivers/hid/intel-ish-hid/ishtp/client.c 		spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
cl                568 drivers/hid/intel-ish-hid/ishtp/client.c 		++cl->err_send_msg;
cl                572 drivers/hid/intel-ish-hid/ishtp/client.c 	cl_msg = list_first_entry(&cl->tx_free_list.list,
cl                575 drivers/hid/intel-ish-hid/ishtp/client.c 		spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
cl                585 drivers/hid/intel-ish-hid/ishtp/client.c 	--cl->tx_ring_free_size;
cl                587 drivers/hid/intel-ish-hid/ishtp/client.c 	spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
cl                590 drivers/hid/intel-ish-hid/ishtp/client.c 	spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
cl                591 drivers/hid/intel-ish-hid/ishtp/client.c 	have_msg_to_send = !list_empty(&cl->tx_list.list);
cl                592 drivers/hid/intel-ish-hid/ishtp/client.c 	list_add_tail(&cl_msg->list, &cl->tx_list.list);
cl                593 drivers/hid/intel-ish-hid/ishtp/client.c 	spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
cl                595 drivers/hid/intel-ish-hid/ishtp/client.c 	if (!have_msg_to_send && cl->ishtp_flow_ctrl_creds > 0)
cl                596 drivers/hid/intel-ish-hid/ishtp/client.c 		ishtp_cl_send_msg(dev, cl);
cl                613 drivers/hid/intel-ish-hid/ishtp/client.c 	struct ishtp_cl	*cl = rb->cl;
cl                615 drivers/hid/intel-ish-hid/ishtp/client.c 	spin_lock_irqsave(&cl->in_process_spinlock, flags);
cl                620 drivers/hid/intel-ish-hid/ishtp/client.c 	schedule_work_flag = list_empty(&cl->in_process_list.list);
cl                621 drivers/hid/intel-ish-hid/ishtp/client.c 	list_add_tail(&rb->list, &cl->in_process_list.list);
cl                622 drivers/hid/intel-ish-hid/ishtp/client.c 	spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
cl                625 drivers/hid/intel-ish-hid/ishtp/client.c 		ishtp_cl_bus_rx_event(cl->device);
cl                637 drivers/hid/intel-ish-hid/ishtp/client.c 	struct ishtp_cl	*cl = prm;
cl                640 drivers/hid/intel-ish-hid/ishtp/client.c 	struct ishtp_device	*dev = (cl ? cl->dev : NULL);
cl                655 drivers/hid/intel-ish-hid/ishtp/client.c 	if (cl->state != ISHTP_CL_CONNECTED)
cl                658 drivers/hid/intel-ish-hid/ishtp/client.c 	spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
cl                659 drivers/hid/intel-ish-hid/ishtp/client.c 	if (list_empty(&cl->tx_list.list)) {
cl                660 drivers/hid/intel-ish-hid/ishtp/client.c 		spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
cl                664 drivers/hid/intel-ish-hid/ishtp/client.c 	if (cl->ishtp_flow_ctrl_creds != 1 && !cl->sending) {
cl                665 drivers/hid/intel-ish-hid/ishtp/client.c 		spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
cl                669 drivers/hid/intel-ish-hid/ishtp/client.c 	if (!cl->sending) {
cl                670 drivers/hid/intel-ish-hid/ishtp/client.c 		--cl->ishtp_flow_ctrl_creds;
cl                671 drivers/hid/intel-ish-hid/ishtp/client.c 		cl->last_ipc_acked = 0;
cl                672 drivers/hid/intel-ish-hid/ishtp/client.c 		cl->last_tx_path = CL_TX_PATH_IPC;
cl                673 drivers/hid/intel-ish-hid/ishtp/client.c 		cl->sending = 1;
cl                676 drivers/hid/intel-ish-hid/ishtp/client.c 	cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
cl                678 drivers/hid/intel-ish-hid/ishtp/client.c 	rem = cl_msg->send_buf.size - cl->tx_offs;
cl                680 drivers/hid/intel-ish-hid/ishtp/client.c 	ishtp_hdr.host_addr = cl->host_client_id;
cl                681 drivers/hid/intel-ish-hid/ishtp/client.c 	ishtp_hdr.fw_addr = cl->fw_client_id;
cl                683 drivers/hid/intel-ish-hid/ishtp/client.c 	pmsg = cl_msg->send_buf.data + cl->tx_offs;
cl                688 drivers/hid/intel-ish-hid/ishtp/client.c 		cl->sending = 0;
cl                690 drivers/hid/intel-ish-hid/ishtp/client.c 		spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
cl                693 drivers/hid/intel-ish-hid/ishtp/client.c 		spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
cl                694 drivers/hid/intel-ish-hid/ishtp/client.c 		list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
cl                695 drivers/hid/intel-ish-hid/ishtp/client.c 		++cl->tx_ring_free_size;
cl                696 drivers/hid/intel-ish-hid/ishtp/client.c 		spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
cl                700 drivers/hid/intel-ish-hid/ishtp/client.c 		spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
cl                701 drivers/hid/intel-ish-hid/ishtp/client.c 		cl->tx_offs += dev->mtu;
cl                704 drivers/hid/intel-ish-hid/ishtp/client.c 		ishtp_send_msg(dev, &ishtp_hdr, pmsg, ipc_tx_callback, cl);
cl                716 drivers/hid/intel-ish-hid/ishtp/client.c 				  struct ishtp_cl *cl)
cl                719 drivers/hid/intel-ish-hid/ishtp/client.c 	if (cl->last_tx_path == CL_TX_PATH_DMA && cl->last_dma_acked == 0)
cl                722 drivers/hid/intel-ish-hid/ishtp/client.c 	cl->tx_offs = 0;
cl                723 drivers/hid/intel-ish-hid/ishtp/client.c 	ipc_tx_callback(cl);
cl                724 drivers/hid/intel-ish-hid/ishtp/client.c 	++cl->send_msg_cnt_ipc;
cl                735 drivers/hid/intel-ish-hid/ishtp/client.c 	struct ishtp_cl *cl)
cl                745 drivers/hid/intel-ish-hid/ishtp/client.c 	if (cl->last_tx_path == CL_TX_PATH_IPC && cl->last_ipc_acked == 0)
cl                748 drivers/hid/intel-ish-hid/ishtp/client.c 	spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
cl                749 drivers/hid/intel-ish-hid/ishtp/client.c 	if (list_empty(&cl->tx_list.list)) {
cl                750 drivers/hid/intel-ish-hid/ishtp/client.c 		spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
cl                754 drivers/hid/intel-ish-hid/ishtp/client.c 	cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
cl                759 drivers/hid/intel-ish-hid/ishtp/client.c 		spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
cl                761 drivers/hid/intel-ish-hid/ishtp/client.c 			ishtp_cl_send_msg_ipc(dev, cl);
cl                766 drivers/hid/intel-ish-hid/ishtp/client.c 	spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
cl                768 drivers/hid/intel-ish-hid/ishtp/client.c 	--cl->ishtp_flow_ctrl_creds;
cl                769 drivers/hid/intel-ish-hid/ishtp/client.c 	cl->last_dma_acked = 0;
cl                770 drivers/hid/intel-ish-hid/ishtp/client.c 	cl->last_dma_addr = msg_addr;
cl                771 drivers/hid/intel-ish-hid/ishtp/client.c 	cl->last_tx_path = CL_TX_PATH_DMA;
cl                780 drivers/hid/intel-ish-hid/ishtp/client.c 	dma_xfer.fw_client_id = cl->fw_client_id;
cl                781 drivers/hid/intel-ish-hid/ishtp/client.c 	dma_xfer.host_client_id = cl->host_client_id;
cl                787 drivers/hid/intel-ish-hid/ishtp/client.c 	spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
cl                788 drivers/hid/intel-ish-hid/ishtp/client.c 	list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
cl                789 drivers/hid/intel-ish-hid/ishtp/client.c 	++cl->tx_ring_free_size;
cl                790 drivers/hid/intel-ish-hid/ishtp/client.c 	spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
cl                791 drivers/hid/intel-ish-hid/ishtp/client.c 	++cl->send_msg_cnt_dma;
cl                801 drivers/hid/intel-ish-hid/ishtp/client.c void ishtp_cl_send_msg(struct ishtp_device *dev, struct ishtp_cl *cl)
cl                804 drivers/hid/intel-ish-hid/ishtp/client.c 		ishtp_cl_send_msg_dma(dev, cl);
cl                806 drivers/hid/intel-ish-hid/ishtp/client.c 		ishtp_cl_send_msg_ipc(dev, cl);
cl                820 drivers/hid/intel-ish-hid/ishtp/client.c 	struct ishtp_cl *cl;
cl                843 drivers/hid/intel-ish-hid/ishtp/client.c 		cl = rb->cl;
cl                844 drivers/hid/intel-ish-hid/ishtp/client.c 		if (!cl || !(cl->host_client_id == ishtp_hdr->host_addr &&
cl                845 drivers/hid/intel-ish-hid/ishtp/client.c 				cl->fw_client_id == ishtp_hdr->fw_addr) ||
cl                846 drivers/hid/intel-ish-hid/ishtp/client.c 				!(cl->state == ISHTP_CL_CONNECTED))
cl                852 drivers/hid/intel-ish-hid/ishtp/client.c 			dev_err(&cl->device->dev,
cl                856 drivers/hid/intel-ish-hid/ishtp/client.c 			cl->status = -ENOMEM;
cl                868 drivers/hid/intel-ish-hid/ishtp/client.c 			dev_err(&cl->device->dev,
cl                874 drivers/hid/intel-ish-hid/ishtp/client.c 			cl->status = -EIO;
cl                884 drivers/hid/intel-ish-hid/ishtp/client.c 			cl->status = 0;
cl                888 drivers/hid/intel-ish-hid/ishtp/client.c 			--cl->out_flow_ctrl_creds;
cl                893 drivers/hid/intel-ish-hid/ishtp/client.c 			spin_lock(&cl->free_list_spinlock);
cl                895 drivers/hid/intel-ish-hid/ishtp/client.c 			if (!list_empty(&cl->free_rb_list.list)) {
cl                896 drivers/hid/intel-ish-hid/ishtp/client.c 				new_rb = list_entry(cl->free_rb_list.list.next,
cl                899 drivers/hid/intel-ish-hid/ishtp/client.c 				spin_unlock(&cl->free_list_spinlock);
cl                900 drivers/hid/intel-ish-hid/ishtp/client.c 				new_rb->cl = cl;
cl                906 drivers/hid/intel-ish-hid/ishtp/client.c 				ishtp_hbm_cl_flow_control_req(dev, cl);
cl                908 drivers/hid/intel-ish-hid/ishtp/client.c 				spin_unlock(&cl->free_list_spinlock);
cl                912 drivers/hid/intel-ish-hid/ishtp/client.c 		++cl->recv_msg_num_frags;
cl                932 drivers/hid/intel-ish-hid/ishtp/client.c 		cl = complete_rb->cl;
cl                933 drivers/hid/intel-ish-hid/ishtp/client.c 		cl->ts_rx = ktime_get();
cl                934 drivers/hid/intel-ish-hid/ishtp/client.c 		++cl->recv_msg_cnt_ipc;
cl                953 drivers/hid/intel-ish-hid/ishtp/client.c 	struct ishtp_cl *cl;
cl                963 drivers/hid/intel-ish-hid/ishtp/client.c 		cl = rb->cl;
cl                964 drivers/hid/intel-ish-hid/ishtp/client.c 		if (!cl || !(cl->host_client_id == hbm->host_client_id &&
cl                965 drivers/hid/intel-ish-hid/ishtp/client.c 				cl->fw_client_id == hbm->fw_client_id) ||
cl                966 drivers/hid/intel-ish-hid/ishtp/client.c 				!(cl->state == ISHTP_CL_CONNECTED))
cl                974 drivers/hid/intel-ish-hid/ishtp/client.c 			dev_err(&cl->device->dev,
cl                978 drivers/hid/intel-ish-hid/ishtp/client.c 			cl->status = -ENOMEM;
cl                990 drivers/hid/intel-ish-hid/ishtp/client.c 			dev_err(&cl->device->dev,
cl                995 drivers/hid/intel-ish-hid/ishtp/client.c 			cl->status = -EIO;
cl               1004 drivers/hid/intel-ish-hid/ishtp/client.c 		cl->status = 0;
cl               1008 drivers/hid/intel-ish-hid/ishtp/client.c 		--cl->out_flow_ctrl_creds;
cl               1013 drivers/hid/intel-ish-hid/ishtp/client.c 		spin_lock(&cl->free_list_spinlock);
cl               1015 drivers/hid/intel-ish-hid/ishtp/client.c 		if (!list_empty(&cl->free_rb_list.list)) {
cl               1016 drivers/hid/intel-ish-hid/ishtp/client.c 			new_rb = list_entry(cl->free_rb_list.list.next,
cl               1019 drivers/hid/intel-ish-hid/ishtp/client.c 			spin_unlock(&cl->free_list_spinlock);
cl               1020 drivers/hid/intel-ish-hid/ishtp/client.c 			new_rb->cl = cl;
cl               1026 drivers/hid/intel-ish-hid/ishtp/client.c 			ishtp_hbm_cl_flow_control_req(dev, cl);
cl               1028 drivers/hid/intel-ish-hid/ishtp/client.c 			spin_unlock(&cl->free_list_spinlock);
cl               1032 drivers/hid/intel-ish-hid/ishtp/client.c 		++cl->recv_msg_num_frags;
cl               1049 drivers/hid/intel-ish-hid/ishtp/client.c 		cl = complete_rb->cl;
cl               1050 drivers/hid/intel-ish-hid/ishtp/client.c 		cl->ts_rx = ktime_get();
cl               1051 drivers/hid/intel-ish-hid/ishtp/client.c 		++cl->recv_msg_cnt_dma;
cl               1058 drivers/hid/intel-ish-hid/ishtp/client.c void *ishtp_get_client_data(struct ishtp_cl *cl)
cl               1060 drivers/hid/intel-ish-hid/ishtp/client.c 	return cl->client_data;
cl               1064 drivers/hid/intel-ish-hid/ishtp/client.c void ishtp_set_client_data(struct ishtp_cl *cl, void *data)
cl               1066 drivers/hid/intel-ish-hid/ishtp/client.c 	cl->client_data = data;
cl               1070 drivers/hid/intel-ish-hid/ishtp/client.c struct ishtp_device *ishtp_get_ishtp_device(struct ishtp_cl *cl)
cl               1072 drivers/hid/intel-ish-hid/ishtp/client.c 	return cl->dev;
cl               1076 drivers/hid/intel-ish-hid/ishtp/client.c void ishtp_set_tx_ring_size(struct ishtp_cl *cl, int size)
cl               1078 drivers/hid/intel-ish-hid/ishtp/client.c 	cl->tx_ring_size = size;
cl               1082 drivers/hid/intel-ish-hid/ishtp/client.c void ishtp_set_rx_ring_size(struct ishtp_cl *cl, int size)
cl               1084 drivers/hid/intel-ish-hid/ishtp/client.c 	cl->rx_ring_size = size;
cl               1088 drivers/hid/intel-ish-hid/ishtp/client.c void ishtp_set_connection_state(struct ishtp_cl *cl, int state)
cl               1090 drivers/hid/intel-ish-hid/ishtp/client.c 	cl->state = state;
cl               1094 drivers/hid/intel-ish-hid/ishtp/client.c void ishtp_cl_set_fw_client_id(struct ishtp_cl *cl, int fw_client_id)
cl               1096 drivers/hid/intel-ish-hid/ishtp/client.c 	cl->fw_client_id = fw_client_id;
cl                114 drivers/hid/intel-ish-hid/ishtp/client.h void ishtp_cl_send_msg(struct ishtp_device *dev, struct ishtp_cl *cl);
cl                117 drivers/hid/intel-ish-hid/ishtp/client.h int ishtp_cl_read_start(struct ishtp_cl *cl);
cl                120 drivers/hid/intel-ish-hid/ishtp/client.h int ishtp_cl_alloc_rx_ring(struct ishtp_cl *cl);
cl                121 drivers/hid/intel-ish-hid/ishtp/client.h int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl);
cl                122 drivers/hid/intel-ish-hid/ishtp/client.h void ishtp_cl_free_rx_ring(struct ishtp_cl *cl);
cl                123 drivers/hid/intel-ish-hid/ishtp/client.h void ishtp_cl_free_tx_ring(struct ishtp_cl *cl);
cl                124 drivers/hid/intel-ish-hid/ishtp/client.h int ishtp_cl_get_tx_free_buffer_size(struct ishtp_cl *cl);
cl                125 drivers/hid/intel-ish-hid/ishtp/client.h int ishtp_cl_get_tx_free_rings(struct ishtp_cl *cl);
cl                139 drivers/hid/intel-ish-hid/ishtp/client.h struct ishtp_cl_rb *ishtp_io_rb_init(struct ishtp_cl *cl);
cl                 55 drivers/hid/intel-ish-hid/ishtp/hbm.c static inline void ishtp_hbm_cl_hdr(struct ishtp_cl *cl, uint8_t hbm_cmd,
cl                 63 drivers/hid/intel-ish-hid/ishtp/hbm.c 	cmd->host_addr = cl->host_client_id;
cl                 64 drivers/hid/intel-ish-hid/ishtp/hbm.c 	cmd->fw_addr = cl->fw_client_id;
cl                 76 drivers/hid/intel-ish-hid/ishtp/hbm.c static inline bool ishtp_hbm_cl_addr_equal(struct ishtp_cl *cl, void *buf)
cl                 80 drivers/hid/intel-ish-hid/ishtp/hbm.c 	return cl->host_client_id == cmd->host_addr &&
cl                 81 drivers/hid/intel-ish-hid/ishtp/hbm.c 		cl->fw_client_id == cmd->fw_addr;
cl                262 drivers/hid/intel-ish-hid/ishtp/hbm.c 				  struct ishtp_cl *cl)
cl                270 drivers/hid/intel-ish-hid/ishtp/hbm.c 	spin_lock_irqsave(&cl->fc_spinlock, flags);
cl                273 drivers/hid/intel-ish-hid/ishtp/hbm.c 	ishtp_hbm_cl_hdr(cl, ISHTP_FLOW_CONTROL_CMD, &flow_ctrl, len);
cl                279 drivers/hid/intel-ish-hid/ishtp/hbm.c 	if (cl->out_flow_ctrl_creds) {
cl                280 drivers/hid/intel-ish-hid/ishtp/hbm.c 		spin_unlock_irqrestore(&cl->fc_spinlock, flags);
cl                284 drivers/hid/intel-ish-hid/ishtp/hbm.c 	cl->recv_msg_num_frags = 0;
cl                288 drivers/hid/intel-ish-hid/ishtp/hbm.c 		++cl->out_flow_ctrl_creds;
cl                289 drivers/hid/intel-ish-hid/ishtp/hbm.c 		++cl->out_flow_ctrl_cnt;
cl                290 drivers/hid/intel-ish-hid/ishtp/hbm.c 		cl->ts_out_fc = ktime_get();
cl                291 drivers/hid/intel-ish-hid/ishtp/hbm.c 		if (cl->ts_rx) {
cl                292 drivers/hid/intel-ish-hid/ishtp/hbm.c 			ktime_t ts_diff = ktime_sub(cl->ts_out_fc, cl->ts_rx);
cl                293 drivers/hid/intel-ish-hid/ishtp/hbm.c 			if (ktime_after(ts_diff, cl->ts_max_fc_delay))
cl                294 drivers/hid/intel-ish-hid/ishtp/hbm.c 				cl->ts_max_fc_delay = ts_diff;
cl                297 drivers/hid/intel-ish-hid/ishtp/hbm.c 		++cl->err_send_fc;
cl                300 drivers/hid/intel-ish-hid/ishtp/hbm.c 	spin_unlock_irqrestore(&cl->fc_spinlock, flags);
cl                313 drivers/hid/intel-ish-hid/ishtp/hbm.c int ishtp_hbm_cl_disconnect_req(struct ishtp_device *dev, struct ishtp_cl *cl)
cl                320 drivers/hid/intel-ish-hid/ishtp/hbm.c 	ishtp_hbm_cl_hdr(cl, CLIENT_DISCONNECT_REQ_CMD, &disconn_req, len);
cl                335 drivers/hid/intel-ish-hid/ishtp/hbm.c 	struct ishtp_cl *cl = NULL;
cl                339 drivers/hid/intel-ish-hid/ishtp/hbm.c 	list_for_each_entry(cl, &dev->cl_list, link) {
cl                340 drivers/hid/intel-ish-hid/ishtp/hbm.c 		if (!rs->status && ishtp_hbm_cl_addr_equal(cl, rs)) {
cl                341 drivers/hid/intel-ish-hid/ishtp/hbm.c 			cl->state = ISHTP_CL_DISCONNECTED;
cl                342 drivers/hid/intel-ish-hid/ishtp/hbm.c 			wake_up_interruptible(&cl->wait_ctrl_res);
cl                358 drivers/hid/intel-ish-hid/ishtp/hbm.c int ishtp_hbm_cl_connect_req(struct ishtp_device *dev, struct ishtp_cl *cl)
cl                365 drivers/hid/intel-ish-hid/ishtp/hbm.c 	ishtp_hbm_cl_hdr(cl, CLIENT_CONNECT_REQ_CMD, &conn_req, len);
cl                380 drivers/hid/intel-ish-hid/ishtp/hbm.c 	struct ishtp_cl *cl = NULL;
cl                384 drivers/hid/intel-ish-hid/ishtp/hbm.c 	list_for_each_entry(cl, &dev->cl_list, link) {
cl                385 drivers/hid/intel-ish-hid/ishtp/hbm.c 		if (ishtp_hbm_cl_addr_equal(cl, rs)) {
cl                387 drivers/hid/intel-ish-hid/ishtp/hbm.c 				cl->state = ISHTP_CL_CONNECTED;
cl                388 drivers/hid/intel-ish-hid/ishtp/hbm.c 				cl->status = 0;
cl                390 drivers/hid/intel-ish-hid/ishtp/hbm.c 				cl->state = ISHTP_CL_DISCONNECTED;
cl                391 drivers/hid/intel-ish-hid/ishtp/hbm.c 				cl->status = -ENODEV;
cl                393 drivers/hid/intel-ish-hid/ishtp/hbm.c 			wake_up_interruptible(&cl->wait_ctrl_res);
cl                410 drivers/hid/intel-ish-hid/ishtp/hbm.c 	struct ishtp_cl *cl;
cl                417 drivers/hid/intel-ish-hid/ishtp/hbm.c 	list_for_each_entry(cl, &dev->cl_list, link) {
cl                418 drivers/hid/intel-ish-hid/ishtp/hbm.c 		if (ishtp_hbm_cl_addr_equal(cl, disconnect_req)) {
cl                419 drivers/hid/intel-ish-hid/ishtp/hbm.c 			cl->state = ISHTP_CL_DISCONNECTED;
cl                423 drivers/hid/intel-ish-hid/ishtp/hbm.c 			ishtp_hbm_cl_hdr(cl, CLIENT_DISCONNECT_RES_CMD, data,
cl                447 drivers/hid/intel-ish-hid/ishtp/hbm.c 	struct ishtp_cl *cl;
cl                466 drivers/hid/intel-ish-hid/ishtp/hbm.c 		list_for_each_entry(cl, &dev->cl_list, link) {
cl                467 drivers/hid/intel-ish-hid/ishtp/hbm.c 			if (cl->fw_client_id == dma_xfer->fw_client_id &&
cl                468 drivers/hid/intel-ish-hid/ishtp/hbm.c 			    cl->host_client_id == dma_xfer->host_client_id)
cl                475 drivers/hid/intel-ish-hid/ishtp/hbm.c 				if (cl->last_dma_addr >=
cl                477 drivers/hid/intel-ish-hid/ishtp/hbm.c 						cl->last_dma_addr <
cl                480 drivers/hid/intel-ish-hid/ishtp/hbm.c 					cl->last_dma_acked = 1;
cl                482 drivers/hid/intel-ish-hid/ishtp/hbm.c 					if (!list_empty(&cl->tx_list.list) &&
cl                483 drivers/hid/intel-ish-hid/ishtp/hbm.c 						cl->ishtp_flow_ctrl_creds) {
cl                487 drivers/hid/intel-ish-hid/ishtp/hbm.c 						ishtp_cl_send_msg(dev, cl);
cl                782 drivers/hid/intel-ish-hid/ishtp/hbm.c 		struct ishtp_cl *cl = NULL;
cl                786 drivers/hid/intel-ish-hid/ishtp/hbm.c 		list_for_each_entry(cl, &dev->cl_list, link) {
cl                787 drivers/hid/intel-ish-hid/ishtp/hbm.c 			if (cl->host_client_id == flow_control->host_addr &&
cl                788 drivers/hid/intel-ish-hid/ishtp/hbm.c 					cl->fw_client_id ==
cl                796 drivers/hid/intel-ish-hid/ishtp/hbm.c 				if (cl->ishtp_flow_ctrl_creds)
cl                799 drivers/hid/intel-ish-hid/ishtp/hbm.c 					 (unsigned int)cl->fw_client_id,
cl                800 drivers/hid/intel-ish-hid/ishtp/hbm.c 					 (unsigned int)cl->host_client_id,
cl                801 drivers/hid/intel-ish-hid/ishtp/hbm.c 					 cl->ishtp_flow_ctrl_creds);
cl                803 drivers/hid/intel-ish-hid/ishtp/hbm.c 					++cl->ishtp_flow_ctrl_creds;
cl                804 drivers/hid/intel-ish-hid/ishtp/hbm.c 					++cl->ishtp_flow_ctrl_cnt;
cl                805 drivers/hid/intel-ish-hid/ishtp/hbm.c 					cl->last_ipc_acked = 1;
cl                807 drivers/hid/intel-ish-hid/ishtp/hbm.c 							&cl->tx_list_spinlock,
cl                809 drivers/hid/intel-ish-hid/ishtp/hbm.c 					if (!list_empty(&cl->tx_list.list)) {
cl                815 drivers/hid/intel-ish-hid/ishtp/hbm.c 							&cl->tx_list_spinlock,
cl                817 drivers/hid/intel-ish-hid/ishtp/hbm.c 						ishtp_cl_send_msg(dev, cl);
cl                820 drivers/hid/intel-ish-hid/ishtp/hbm.c 							&cl->tx_list_spinlock,
cl                296 drivers/hid/intel-ish-hid/ishtp/hbm.h 				  struct ishtp_cl *cl);
cl                297 drivers/hid/intel-ish-hid/ishtp/hbm.h int ishtp_hbm_cl_disconnect_req(struct ishtp_device *dev, struct ishtp_cl *cl);
cl                298 drivers/hid/intel-ish-hid/ishtp/hbm.h int ishtp_hbm_cl_connect_req(struct ishtp_device *dev, struct ishtp_cl *cl);
cl                 38 drivers/hsi/clients/cmt_speech.c 	struct hsi_client	*cl;
cl                 80 drivers/hsi/clients/cmt_speech.c 	struct hsi_client		*cl;
cl                144 drivers/hsi/clients/cmt_speech.c 		dev_err(&cs_char_data.cl->device,
cl                188 drivers/hsi/clients/cmt_speech.c 		dev_dbg(&cs_char_data.cl->device, "data notification "
cl                222 drivers/hsi/clients/cmt_speech.c 	dev_dbg(&cs_char_data.cl->device, "control cmd destructor\n");
cl                225 drivers/hsi/clients/cmt_speech.c 		dev_err(&hi->cl->device, "Cmd flushed while driver active\n");
cl                299 drivers/hsi/clients/cmt_speech.c 	dev_dbg(&cs_char_data.cl->device, "Freeing data %s message\n", dir);
cl                303 drivers/hsi/clients/cmt_speech.c 		dev_err(&cs_char_data.cl->device,
cl                370 drivers/hsi/clients/cmt_speech.c 	dev_err(&hi->cl->device, "HSI %s error, msg %d, state %u\n",
cl                433 drivers/hsi/clients/cmt_speech.c 		dev_err(&hi->cl->device, "Control RX error detected\n");
cl                438 drivers/hsi/clients/cmt_speech.c 	dev_dbg(&hi->cl->device, "Read on control: %08X\n", cmd);
cl                464 drivers/hsi/clients/cmt_speech.c 		dev_err(&hi->cl->device, "Control peek RX error detected\n");
cl                471 drivers/hsi/clients/cmt_speech.c 	dev_dbg(&hi->cl->device, "Peek on control complete, reading\n");
cl                474 drivers/hsi/clients/cmt_speech.c 	ret = hsi_async_read(hi->cl, msg);
cl                486 drivers/hsi/clients/cmt_speech.c 		dev_err(&hi->cl->device, "Control read already pending (%d)\n",
cl                492 drivers/hsi/clients/cmt_speech.c 		dev_err(&hi->cl->device, "Control read error (%d)\n",
cl                498 drivers/hsi/clients/cmt_speech.c 	dev_dbg(&hi->cl->device, "Issuing RX on control\n");
cl                504 drivers/hsi/clients/cmt_speech.c 	ret = hsi_async_read(hi->cl, msg);
cl                520 drivers/hsi/clients/cmt_speech.c 		dev_err(&hi->cl->device,
cl                537 drivers/hsi/clients/cmt_speech.c 		dev_err(&hi->cl->device,
cl                549 drivers/hsi/clients/cmt_speech.c 	dev_dbg(&hi->cl->device,
cl                551 drivers/hsi/clients/cmt_speech.c 	ret = hsi_async_write(hi->cl, msg);
cl                553 drivers/hsi/clients/cmt_speech.c 		dev_err(&hi->cl->device,
cl                566 drivers/hsi/clients/cmt_speech.c 		dev_err(&hi->cl->device, "Restarting control reads\n");
cl                611 drivers/hsi/clients/cmt_speech.c 		dev_err(&hi->cl->device, "Data received in invalid state\n");
cl                627 drivers/hsi/clients/cmt_speech.c 	ret = hsi_async_read(hi->cl, msg);
cl                658 drivers/hsi/clients/cmt_speech.c 		dev_dbg(&hi->cl->device, "Data read already pending (%u)\n",
cl                671 drivers/hsi/clients/cmt_speech.c 	ret = hsi_async_read(hi->cl, rxmsg);
cl                699 drivers/hsi/clients/cmt_speech.c 		dev_err(&hi->cl->device, "Not configured, aborting\n");
cl                704 drivers/hsi/clients/cmt_speech.c 		dev_err(&hi->cl->device, "HSI error, aborting\n");
cl                709 drivers/hsi/clients/cmt_speech.c 		dev_err(&hi->cl->device, "Write pending on data channel.\n");
cl                721 drivers/hsi/clients/cmt_speech.c 	ret = hsi_async_write(hi->cl, txmsg);
cl                772 drivers/hsi/clients/cmt_speech.c 		dev_dbg(&hi->cl->device, "setting wake line to %d (%p)\n",
cl                773 drivers/hsi/clients/cmt_speech.c 			new_state, hi->cl);
cl                784 drivers/hsi/clients/cmt_speech.c 	dev_dbg(&hi->cl->device, "wake line set to %d (%p)\n",
cl                785 drivers/hsi/clients/cmt_speech.c 		new_state, hi->cl);
cl                822 drivers/hsi/clients/cmt_speech.c 		dev_err(&hi->cl->device, "No space for the requested buffer "
cl                840 drivers/hsi/clients/cmt_speech.c 		dev_dbg(&hi->cl->device, "hsi_data_sync break, idle\n");
cl                864 drivers/hsi/clients/cmt_speech.c 			dev_dbg(&hi->cl->device,
cl                874 drivers/hsi/clients/cmt_speech.c 	dev_dbg(&hi->cl->device, "hsi_data_sync done with res %d\n", r);
cl                889 drivers/hsi/clients/cmt_speech.c 	dev_dbg(&hi->cl->device,
cl                894 drivers/hsi/clients/cmt_speech.c 	dev_dbg(&hi->cl->device,
cl                901 drivers/hsi/clients/cmt_speech.c 		dev_dbg(&hi->cl->device, "DL buf #%u at %u\n",
cl                908 drivers/hsi/clients/cmt_speech.c 		dev_dbg(&hi->cl->device, "UL buf #%u at %u\n",
cl                918 drivers/hsi/clients/cmt_speech.c 		dev_dbg(&hi->cl->device,
cl                985 drivers/hsi/clients/cmt_speech.c static int cs_hsi_start(struct cs_hsi_iface **hi, struct hsi_client *cl,
cl                991 drivers/hsi/clients/cmt_speech.c 	dev_dbg(&cl->device, "cs_hsi_start\n");
cl                998 drivers/hsi/clients/cmt_speech.c 	hsi_if->cl = cl;
cl               1007 drivers/hsi/clients/cmt_speech.c 		dev_err(&cl->device, "Unable to alloc HSI messages\n");
cl               1012 drivers/hsi/clients/cmt_speech.c 		dev_err(&cl->device, "Unable to alloc HSI messages for data\n");
cl               1015 drivers/hsi/clients/cmt_speech.c 	err = hsi_claim_port(cl, 1);
cl               1017 drivers/hsi/clients/cmt_speech.c 		dev_err(&cl->device,
cl               1021 drivers/hsi/clients/cmt_speech.c 	hsi_if->master = ssip_slave_get_master(cl);
cl               1024 drivers/hsi/clients/cmt_speech.c 		dev_err(&cl->device, "Could not get HSI master client\n");
cl               1029 drivers/hsi/clients/cmt_speech.c 		dev_err(&cl->device,
cl               1039 drivers/hsi/clients/cmt_speech.c 	dev_dbg(&cl->device, "cs_hsi_start...done\n");
cl               1047 drivers/hsi/clients/cmt_speech.c 	hsi_release_port(cl);
cl               1055 drivers/hsi/clients/cmt_speech.c 	dev_dbg(&cl->device, "cs_hsi_start...done/error\n\n");
cl               1062 drivers/hsi/clients/cmt_speech.c 	dev_dbg(&hi->cl->device, "cs_hsi_stop\n");
cl               1068 drivers/hsi/clients/cmt_speech.c 	hsi_release_port(hi->cl);
cl               1296 drivers/hsi/clients/cmt_speech.c 	ret = cs_hsi_start(&cs_char_data.hi, cs_char_data.cl, p, CS_MMAP_SIZE);
cl               1298 drivers/hsi/clients/cmt_speech.c 		dev_err(&cs_char_data.cl->device, "Unable to initialize HSI\n");
cl               1372 drivers/hsi/clients/cmt_speech.c 	struct hsi_client *cl = to_hsi_client(dev);
cl               1378 drivers/hsi/clients/cmt_speech.c 	cs_char_data.cl = cl;
cl               1383 drivers/hsi/clients/cmt_speech.c 	cs_char_data.channel_id_cmd = hsi_get_channel_id_by_name(cl,
cl               1391 drivers/hsi/clients/cmt_speech.c 	cs_char_data.channel_id_data = hsi_get_channel_id_by_name(cl,
cl                 85 drivers/hsi/clients/hsi_char.c 	struct hsi_client	*cl;
cl                105 drivers/hsi/clients/hsi_char.c 	struct hsi_client	*cl;
cl                227 drivers/hsi/clients/hsi_char.c 	struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl);
cl                247 drivers/hsi/clients/hsi_char.c 	struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl);
cl                267 drivers/hsi/clients/hsi_char.c 	struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl);
cl                275 drivers/hsi/clients/hsi_char.c 	struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl);
cl                294 drivers/hsi/clients/hsi_char.c 	hsi_flush(msg->cl);
cl                295 drivers/hsi/clients/hsi_char.c 	ret = hsi_async_read(msg->cl, msg);
cl                300 drivers/hsi/clients/hsi_char.c static int hsc_break_request(struct hsi_client *cl)
cl                302 drivers/hsi/clients/hsi_char.c 	struct hsc_client_data *cl_data = hsi_client_drvdata(cl);
cl                317 drivers/hsi/clients/hsi_char.c 	ret = hsi_async_read(cl, msg);
cl                324 drivers/hsi/clients/hsi_char.c static int hsc_break_send(struct hsi_client *cl)
cl                335 drivers/hsi/clients/hsi_char.c 	ret = hsi_async_write(cl, msg);
cl                342 drivers/hsi/clients/hsi_char.c static int hsc_rx_set(struct hsi_client *cl, struct hsc_rx_config *rxc)
cl                355 drivers/hsi/clients/hsi_char.c 	tmp = cl->rx_cfg;
cl                356 drivers/hsi/clients/hsi_char.c 	cl->rx_cfg.mode = rxc->mode;
cl                357 drivers/hsi/clients/hsi_char.c 	cl->rx_cfg.num_hw_channels = rxc->channels;
cl                358 drivers/hsi/clients/hsi_char.c 	cl->rx_cfg.flow = rxc->flow;
cl                359 drivers/hsi/clients/hsi_char.c 	ret = hsi_setup(cl);
cl                361 drivers/hsi/clients/hsi_char.c 		cl->rx_cfg = tmp;
cl                365 drivers/hsi/clients/hsi_char.c 		hsc_break_request(cl);
cl                370 drivers/hsi/clients/hsi_char.c static inline void hsc_rx_get(struct hsi_client *cl, struct hsc_rx_config *rxc)
cl                372 drivers/hsi/clients/hsi_char.c 	rxc->mode = cl->rx_cfg.mode;
cl                373 drivers/hsi/clients/hsi_char.c 	rxc->channels = cl->rx_cfg.num_hw_channels;
cl                374 drivers/hsi/clients/hsi_char.c 	rxc->flow = cl->rx_cfg.flow;
cl                377 drivers/hsi/clients/hsi_char.c static int hsc_tx_set(struct hsi_client *cl, struct hsc_tx_config *txc)
cl                390 drivers/hsi/clients/hsi_char.c 	tmp = cl->tx_cfg;
cl                391 drivers/hsi/clients/hsi_char.c 	cl->tx_cfg.mode = txc->mode;
cl                392 drivers/hsi/clients/hsi_char.c 	cl->tx_cfg.num_hw_channels = txc->channels;
cl                393 drivers/hsi/clients/hsi_char.c 	cl->tx_cfg.speed = txc->speed;
cl                394 drivers/hsi/clients/hsi_char.c 	cl->tx_cfg.arb_mode = txc->arb_mode;
cl                395 drivers/hsi/clients/hsi_char.c 	ret = hsi_setup(cl);
cl                397 drivers/hsi/clients/hsi_char.c 		cl->tx_cfg = tmp;
cl                404 drivers/hsi/clients/hsi_char.c static inline void hsc_tx_get(struct hsi_client *cl, struct hsc_tx_config *txc)
cl                406 drivers/hsi/clients/hsi_char.c 	txc->mode = cl->tx_cfg.mode;
cl                407 drivers/hsi/clients/hsi_char.c 	txc->channels = cl->tx_cfg.num_hw_channels;
cl                408 drivers/hsi/clients/hsi_char.c 	txc->speed = cl->tx_cfg.speed;
cl                409 drivers/hsi/clients/hsi_char.c 	txc->arb_mode = cl->tx_cfg.arb_mode;
cl                425 drivers/hsi/clients/hsi_char.c 	if (channel->ch >= channel->cl->rx_cfg.num_hw_channels)
cl                437 drivers/hsi/clients/hsi_char.c 	ret = hsi_async_read(channel->cl, msg);
cl                447 drivers/hsi/clients/hsi_char.c 		hsi_flush(channel->cl);
cl                482 drivers/hsi/clients/hsi_char.c 	if (channel->ch >= channel->cl->tx_cfg.num_hw_channels)
cl                498 drivers/hsi/clients/hsi_char.c 	ret = hsi_async_write(channel->cl, msg);
cl                506 drivers/hsi/clients/hsi_char.c 		hsi_flush(channel->cl);
cl                535 drivers/hsi/clients/hsi_char.c 		hsi_flush(channel->cl);
cl                543 drivers/hsi/clients/hsi_char.c 			ret = hsi_start_tx(channel->cl);
cl                547 drivers/hsi/clients/hsi_char.c 			ret = hsi_stop_tx(channel->cl);
cl                553 drivers/hsi/clients/hsi_char.c 		return hsc_break_send(channel->cl);
cl                557 drivers/hsi/clients/hsi_char.c 		return hsc_rx_set(channel->cl, &rxc);
cl                559 drivers/hsi/clients/hsi_char.c 		hsc_rx_get(channel->cl, &rxc);
cl                566 drivers/hsi/clients/hsi_char.c 		return hsc_tx_set(channel->cl, &txc);
cl                568 drivers/hsi/clients/hsi_char.c 		hsc_tx_get(channel->cl, &txc);
cl                584 drivers/hsi/clients/hsi_char.c 		hsi_flush(cl_data->cl);
cl                585 drivers/hsi/clients/hsi_char.c 		hsi_release_port(cl_data->cl);
cl                610 drivers/hsi/clients/hsi_char.c 		ret = hsi_claim_port(cl_data->cl, 0);
cl                613 drivers/hsi/clients/hsi_char.c 		hsi_setup(cl_data->cl);
cl                641 drivers/hsi/clients/hsi_char.c 		hsi_stop_tx(channel->cl);
cl                680 drivers/hsi/clients/hsi_char.c 	struct hsi_client *cl = to_hsi_client(dev);
cl                690 drivers/hsi/clients/hsi_char.c 	hsc_baseminor = HSC_BASEMINOR(hsi_id(cl), hsi_port_id(cl));
cl                706 drivers/hsi/clients/hsi_char.c 	hsi_client_set_drvdata(cl, cl_data);
cl                709 drivers/hsi/clients/hsi_char.c 	cl_data->cl = cl;
cl                713 drivers/hsi/clients/hsi_char.c 		channel->cl = cl;
cl                735 drivers/hsi/clients/hsi_char.c 	struct hsi_client *cl = to_hsi_client(dev);
cl                736 drivers/hsi/clients/hsi_char.c 	struct hsc_client_data *cl_data = hsi_client_drvdata(cl);
cl                741 drivers/hsi/clients/hsi_char.c 	hsi_client_set_drvdata(cl, NULL);
cl                131 drivers/hsi/clients/nokia-modem.c 	struct hsi_client *cl = to_hsi_client(dev);
cl                132 drivers/hsi/clients/nokia-modem.c 	struct hsi_port *port = hsi_get_port(cl);
cl                178 drivers/hsi/clients/nokia-modem.c 	ssip.tx_cfg = cl->tx_cfg;
cl                179 drivers/hsi/clients/nokia-modem.c 	ssip.rx_cfg = cl->rx_cfg;
cl                201 drivers/hsi/clients/nokia-modem.c 	cmtspeech.tx_cfg = cl->tx_cfg;
cl                202 drivers/hsi/clients/nokia-modem.c 	cmtspeech.rx_cfg = cl->rx_cfg;
cl                 35 drivers/hsi/clients/ssi_protocol.c void ssi_waketest(struct hsi_client *cl, unsigned int enable);
cl                141 drivers/hsi/clients/ssi_protocol.c 	struct hsi_client	*cl;
cl                219 drivers/hsi/clients/ssi_protocol.c 	struct ssi_protocol *ssi = hsi_client_drvdata(msg->cl);
cl                221 drivers/hsi/clients/ssi_protocol.c 	dev_dbg(&msg->cl->device, "Release cmd 0x%08x\n", ssip_get_cmd(msg));
cl                333 drivers/hsi/clients/ssi_protocol.c 		if (slave->device.parent == ssi->cl->device.parent) {
cl                334 drivers/hsi/clients/ssi_protocol.c 			master = ssi->cl;
cl                387 drivers/hsi/clients/ssi_protocol.c static void ssip_reset(struct hsi_client *cl)
cl                389 drivers/hsi/clients/ssi_protocol.c 	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
cl                395 drivers/hsi/clients/ssi_protocol.c 	hsi_flush(cl);
cl                398 drivers/hsi/clients/ssi_protocol.c 		hsi_stop_tx(cl);
cl                401 drivers/hsi/clients/ssi_protocol.c 		ssi_waketest(cl, 0); /* FIXME: To be removed */
cl                414 drivers/hsi/clients/ssi_protocol.c 		dev_dbg(&cl->device, "Pending TX data\n");
cl                422 drivers/hsi/clients/ssi_protocol.c static void ssip_dump_state(struct hsi_client *cl)
cl                424 drivers/hsi/clients/ssi_protocol.c 	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
cl                428 drivers/hsi/clients/ssi_protocol.c 	dev_err(&cl->device, "Main state: %d\n", ssi->main_state);
cl                429 drivers/hsi/clients/ssi_protocol.c 	dev_err(&cl->device, "Recv state: %d\n", ssi->recv_state);
cl                430 drivers/hsi/clients/ssi_protocol.c 	dev_err(&cl->device, "Send state: %d\n", ssi->send_state);
cl                431 drivers/hsi/clients/ssi_protocol.c 	dev_err(&cl->device, "CMT %s\n", (ssi->main_state == ACTIVE) ?
cl                433 drivers/hsi/clients/ssi_protocol.c 	dev_err(&cl->device, "Wake test %d\n",
cl                435 drivers/hsi/clients/ssi_protocol.c 	dev_err(&cl->device, "Data RX id: %d\n", ssi->rxid);
cl                436 drivers/hsi/clients/ssi_protocol.c 	dev_err(&cl->device, "Data TX id: %d\n", ssi->txid);
cl                439 drivers/hsi/clients/ssi_protocol.c 		dev_err(&cl->device, "pending TX data (%p)\n", msg);
cl                443 drivers/hsi/clients/ssi_protocol.c static void ssip_error(struct hsi_client *cl)
cl                445 drivers/hsi/clients/ssi_protocol.c 	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
cl                448 drivers/hsi/clients/ssi_protocol.c 	ssip_dump_state(cl);
cl                449 drivers/hsi/clients/ssi_protocol.c 	ssip_reset(cl);
cl                452 drivers/hsi/clients/ssi_protocol.c 	hsi_async_read(cl, msg);
cl                458 drivers/hsi/clients/ssi_protocol.c 	struct hsi_client *cl = ssi->cl;
cl                460 drivers/hsi/clients/ssi_protocol.c 	dev_dbg(&cl->device, "Keep alive kick in: m(%d) r(%d) s(%d)\n",
cl                485 drivers/hsi/clients/ssi_protocol.c 	struct hsi_client *cl = ssi->cl;
cl                487 drivers/hsi/clients/ssi_protocol.c 	dev_err(&cl->device, "Watchdog triggered\n");
cl                488 drivers/hsi/clients/ssi_protocol.c 	ssip_error(cl);
cl                494 drivers/hsi/clients/ssi_protocol.c 	struct hsi_client *cl = ssi->cl;
cl                496 drivers/hsi/clients/ssi_protocol.c 	dev_err(&cl->device, "Watchdog triggered\n");
cl                497 drivers/hsi/clients/ssi_protocol.c 	ssip_error(cl);
cl                500 drivers/hsi/clients/ssi_protocol.c static void ssip_send_bootinfo_req_cmd(struct hsi_client *cl)
cl                502 drivers/hsi/clients/ssi_protocol.c 	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
cl                505 drivers/hsi/clients/ssi_protocol.c 	dev_dbg(&cl->device, "Issuing BOOT INFO REQ command\n");
cl                509 drivers/hsi/clients/ssi_protocol.c 	hsi_async_write(cl, msg);
cl                510 drivers/hsi/clients/ssi_protocol.c 	dev_dbg(&cl->device, "Issuing RX command\n");
cl                513 drivers/hsi/clients/ssi_protocol.c 	hsi_async_read(cl, msg);
cl                516 drivers/hsi/clients/ssi_protocol.c static void ssip_start_rx(struct hsi_client *cl)
cl                518 drivers/hsi/clients/ssi_protocol.c 	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
cl                521 drivers/hsi/clients/ssi_protocol.c 	dev_dbg(&cl->device, "RX start M(%d) R(%d)\n", ssi->main_state,
cl                538 drivers/hsi/clients/ssi_protocol.c 	dev_dbg(&cl->device, "Send READY\n");
cl                539 drivers/hsi/clients/ssi_protocol.c 	hsi_async_write(cl, msg);
cl                542 drivers/hsi/clients/ssi_protocol.c static void ssip_stop_rx(struct hsi_client *cl)
cl                544 drivers/hsi/clients/ssi_protocol.c 	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
cl                546 drivers/hsi/clients/ssi_protocol.c 	dev_dbg(&cl->device, "RX stop M(%d)\n", ssi->main_state);
cl                561 drivers/hsi/clients/ssi_protocol.c 	struct hsi_client *cl = msg->cl;
cl                562 drivers/hsi/clients/ssi_protocol.c 	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
cl                570 drivers/hsi/clients/ssi_protocol.c 	hsi_async_write(cl, data);
cl                573 drivers/hsi/clients/ssi_protocol.c static int ssip_xmit(struct hsi_client *cl)
cl                575 drivers/hsi/clients/ssi_protocol.c 	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
cl                602 drivers/hsi/clients/ssi_protocol.c 	dev_dbg(&cl->device, "Send STRANS (%d frames)\n",
cl                605 drivers/hsi/clients/ssi_protocol.c 	return hsi_async_write(cl, msg);
cl                642 drivers/hsi/clients/ssi_protocol.c 	struct hsi_client *cl = msg->cl;
cl                643 drivers/hsi/clients/ssi_protocol.c 	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
cl                647 drivers/hsi/clients/ssi_protocol.c 		dev_err(&cl->device, "RX data error\n");
cl                649 drivers/hsi/clients/ssi_protocol.c 		ssip_error(cl);
cl                658 drivers/hsi/clients/ssi_protocol.c static void ssip_rx_bootinforeq(struct hsi_client *cl, u32 cmd)
cl                660 drivers/hsi/clients/ssi_protocol.c 	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
cl                669 drivers/hsi/clients/ssi_protocol.c 		dev_err(&cl->device, "Boot info req on active state\n");
cl                670 drivers/hsi/clients/ssi_protocol.c 		ssip_error(cl);
cl                679 drivers/hsi/clients/ssi_protocol.c 			ssi_waketest(cl, 1); /* FIXME: To be removed */
cl                685 drivers/hsi/clients/ssi_protocol.c 		dev_dbg(&cl->device, "Send BOOTINFO_RESP\n");
cl                687 drivers/hsi/clients/ssi_protocol.c 			dev_warn(&cl->device, "boot info req verid mismatch\n");
cl                691 drivers/hsi/clients/ssi_protocol.c 		hsi_async_write(cl, msg);
cl                694 drivers/hsi/clients/ssi_protocol.c 		dev_dbg(&cl->device, "Wrong state M(%d)\n", ssi->main_state);
cl                699 drivers/hsi/clients/ssi_protocol.c static void ssip_rx_bootinforesp(struct hsi_client *cl, u32 cmd)
cl                701 drivers/hsi/clients/ssi_protocol.c 	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
cl                704 drivers/hsi/clients/ssi_protocol.c 		dev_warn(&cl->device, "boot info resp verid mismatch\n");
cl                711 drivers/hsi/clients/ssi_protocol.c 		dev_dbg(&cl->device, "boot info resp ignored M(%d)\n",
cl                716 drivers/hsi/clients/ssi_protocol.c static void ssip_rx_waketest(struct hsi_client *cl, u32 cmd)
cl                718 drivers/hsi/clients/ssi_protocol.c 	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
cl                723 drivers/hsi/clients/ssi_protocol.c 		dev_dbg(&cl->device, "wake lines test ignored M(%d)\n",
cl                731 drivers/hsi/clients/ssi_protocol.c 		ssi_waketest(cl, 0); /* FIXME: To be removed */
cl                738 drivers/hsi/clients/ssi_protocol.c 	dev_notice(&cl->device, "WAKELINES TEST %s\n",
cl                741 drivers/hsi/clients/ssi_protocol.c 		ssip_error(cl);
cl                744 drivers/hsi/clients/ssi_protocol.c 	dev_dbg(&cl->device, "CMT is ONLINE\n");
cl                749 drivers/hsi/clients/ssi_protocol.c static void ssip_rx_ready(struct hsi_client *cl)
cl                751 drivers/hsi/clients/ssi_protocol.c 	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
cl                755 drivers/hsi/clients/ssi_protocol.c 		dev_dbg(&cl->device, "READY on wrong state: S(%d) M(%d)\n",
cl                761 drivers/hsi/clients/ssi_protocol.c 		dev_dbg(&cl->device, "Ignore spurious READY command\n");
cl                767 drivers/hsi/clients/ssi_protocol.c 	ssip_xmit(cl);
cl                770 drivers/hsi/clients/ssi_protocol.c static void ssip_rx_strans(struct hsi_client *cl, u32 cmd)
cl                772 drivers/hsi/clients/ssi_protocol.c 	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
cl                777 drivers/hsi/clients/ssi_protocol.c 	dev_dbg(&cl->device, "RX strans: %d frames\n", len);
cl                780 drivers/hsi/clients/ssi_protocol.c 		dev_err(&cl->device, "START TRANS wrong state: S(%d) M(%d)\n",
cl                787 drivers/hsi/clients/ssi_protocol.c 		dev_err(&cl->device, "START TRANS id %d expected %d\n",
cl                796 drivers/hsi/clients/ssi_protocol.c 		dev_err(&cl->device, "No memory for rx skb\n");
cl                803 drivers/hsi/clients/ssi_protocol.c 		dev_err(&cl->device, "No memory for RX data msg\n");
cl                807 drivers/hsi/clients/ssi_protocol.c 	hsi_async_read(cl, msg);
cl                813 drivers/hsi/clients/ssi_protocol.c 	ssip_error(cl);
cl                818 drivers/hsi/clients/ssi_protocol.c 	struct hsi_client *cl = msg->cl;
cl                823 drivers/hsi/clients/ssi_protocol.c 		dev_err(&cl->device, "RX error detected\n");
cl                825 drivers/hsi/clients/ssi_protocol.c 		ssip_error(cl);
cl                828 drivers/hsi/clients/ssi_protocol.c 	hsi_async_read(cl, msg);
cl                829 drivers/hsi/clients/ssi_protocol.c 	dev_dbg(&cl->device, "RX cmd: 0x%08x\n", cmd);
cl                835 drivers/hsi/clients/ssi_protocol.c 		ssip_rx_bootinforeq(cl, cmd);
cl                838 drivers/hsi/clients/ssi_protocol.c 		ssip_rx_bootinforesp(cl, cmd);
cl                841 drivers/hsi/clients/ssi_protocol.c 		ssip_rx_waketest(cl, cmd);
cl                844 drivers/hsi/clients/ssi_protocol.c 		ssip_rx_strans(cl, cmd);
cl                847 drivers/hsi/clients/ssi_protocol.c 		ssip_rx_ready(cl);
cl                850 drivers/hsi/clients/ssi_protocol.c 		dev_warn(&cl->device, "command 0x%08x not supported\n", cmd);
cl                857 drivers/hsi/clients/ssi_protocol.c 	struct hsi_client *cl = msg->cl;
cl                858 drivers/hsi/clients/ssi_protocol.c 	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
cl                867 drivers/hsi/clients/ssi_protocol.c 			hsi_stop_tx(cl);
cl                872 drivers/hsi/clients/ssi_protocol.c 		ssip_xmit(cl);
cl                879 drivers/hsi/clients/ssi_protocol.c 	struct hsi_client *cl = msg->cl;
cl                880 drivers/hsi/clients/ssi_protocol.c 	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
cl                884 drivers/hsi/clients/ssi_protocol.c 		dev_err(&cl->device, "TX data error\n");
cl                885 drivers/hsi/clients/ssi_protocol.c 		ssip_error(cl);
cl                895 drivers/hsi/clients/ssi_protocol.c 		dev_dbg(&cl->device, "Send SWBREAK\n");
cl                896 drivers/hsi/clients/ssi_protocol.c 		hsi_async_write(cl, cmsg);
cl                899 drivers/hsi/clients/ssi_protocol.c 		ssip_xmit(cl);
cl                905 drivers/hsi/clients/ssi_protocol.c static void ssip_port_event(struct hsi_client *cl, unsigned long event)
cl                909 drivers/hsi/clients/ssi_protocol.c 		ssip_start_rx(cl);
cl                912 drivers/hsi/clients/ssi_protocol.c 		ssip_stop_rx(cl);
cl                921 drivers/hsi/clients/ssi_protocol.c 	struct hsi_client *cl = to_hsi_client(dev->dev.parent);
cl                922 drivers/hsi/clients/ssi_protocol.c 	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
cl                925 drivers/hsi/clients/ssi_protocol.c 	err = hsi_claim_port(cl, 1);
cl                927 drivers/hsi/clients/ssi_protocol.c 		dev_err(&cl->device, "SSI port already claimed\n");
cl                930 drivers/hsi/clients/ssi_protocol.c 	err = hsi_register_port_event(cl, ssip_port_event);
cl                932 drivers/hsi/clients/ssi_protocol.c 		dev_err(&cl->device, "Register HSI port event failed (%d)\n",
cl                936 drivers/hsi/clients/ssi_protocol.c 	dev_dbg(&cl->device, "Configuring SSI port\n");
cl                937 drivers/hsi/clients/ssi_protocol.c 	hsi_setup(cl);
cl                940 drivers/hsi/clients/ssi_protocol.c 		ssi_waketest(cl, 1); /* FIXME: To be removed */
cl                946 drivers/hsi/clients/ssi_protocol.c 	ssip_send_bootinfo_req_cmd(cl);
cl                953 drivers/hsi/clients/ssi_protocol.c 	struct hsi_client *cl = to_hsi_client(dev->dev.parent);
cl                955 drivers/hsi/clients/ssi_protocol.c 	ssip_reset(cl);
cl                956 drivers/hsi/clients/ssi_protocol.c 	hsi_unregister_port_event(cl);
cl                957 drivers/hsi/clients/ssi_protocol.c 	hsi_release_port(cl);
cl                966 drivers/hsi/clients/ssi_protocol.c 	struct hsi_client *cl = ssi->cl;
cl                968 drivers/hsi/clients/ssi_protocol.c 	ssip_xmit(cl);
cl                973 drivers/hsi/clients/ssi_protocol.c 	struct hsi_client *cl = to_hsi_client(dev->dev.parent);
cl                974 drivers/hsi/clients/ssi_protocol.c 	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
cl                996 drivers/hsi/clients/ssi_protocol.c 		dev_dbg(&cl->device, "Dropping tx data: No memory\n");
cl               1004 drivers/hsi/clients/ssi_protocol.c 		dev_dbg(&cl->device, "Dropping tx data: CMT is OFFLINE\n");
cl               1010 drivers/hsi/clients/ssi_protocol.c 		dev_info(&cl->device, "TX queue full %d\n", ssi->txqueue_len);
cl               1016 drivers/hsi/clients/ssi_protocol.c 		dev_dbg(&cl->device, "Start TX qlen %d\n", ssi->txqueue_len);
cl               1017 drivers/hsi/clients/ssi_protocol.c 		hsi_start_tx(cl);
cl               1020 drivers/hsi/clients/ssi_protocol.c 		dev_dbg(&cl->device, "Start TX on SEND READY qlen %d\n",
cl               1045 drivers/hsi/clients/ssi_protocol.c 	dev_err(&ssi->cl->device, "CMT reset detected!\n");
cl               1046 drivers/hsi/clients/ssi_protocol.c 	ssip_error(ssi->cl);
cl               1075 drivers/hsi/clients/ssi_protocol.c 	struct hsi_client *cl = to_hsi_client(dev);
cl               1090 drivers/hsi/clients/ssi_protocol.c 	hsi_client_set_drvdata(cl, ssi);
cl               1091 drivers/hsi/clients/ssi_protocol.c 	ssi->cl = cl;
cl               1094 drivers/hsi/clients/ssi_protocol.c 	ssi->channel_id_cmd = hsi_get_channel_id_by_name(cl, "mcsaab-control");
cl               1101 drivers/hsi/clients/ssi_protocol.c 	ssi->channel_id_data = hsi_get_channel_id_by_name(cl, "mcsaab-data");
cl               1151 drivers/hsi/clients/ssi_protocol.c 	struct hsi_client *cl = to_hsi_client(dev);
cl               1152 drivers/hsi/clients/ssi_protocol.c 	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
cl               1157 drivers/hsi/clients/ssi_protocol.c 	hsi_client_set_drvdata(cl, NULL);
cl                147 drivers/hsi/controllers/omap_ssi_core.c void ssi_waketest(struct hsi_client *cl, unsigned int enable)
cl                149 drivers/hsi/controllers/omap_ssi_core.c 	struct hsi_port *port = hsi_get_port(cl);
cl                171 drivers/hsi/controllers/omap_ssi_core.c 	struct hsi_port *port = to_hsi_port(msg->cl->device.parent);
cl                 28 drivers/hsi/controllers/omap_ssi_port.c static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused)
cl                192 drivers/hsi/controllers/omap_ssi_port.c 	struct hsi_port *port = hsi_get_port(msg->cl);
cl                209 drivers/hsi/controllers/omap_ssi_port.c 	struct hsi_port *port = hsi_get_port(msg->cl);
cl                288 drivers/hsi/controllers/omap_ssi_port.c 	struct hsi_port *port = hsi_get_port(msg->cl);
cl                340 drivers/hsi/controllers/omap_ssi_port.c 	struct hsi_port *port = hsi_get_port(msg->cl);
cl                379 drivers/hsi/controllers/omap_ssi_port.c 	struct hsi_port *port = hsi_get_port(msg->cl);
cl                434 drivers/hsi/controllers/omap_ssi_port.c static void ssi_flush_queue(struct list_head *queue, struct hsi_client *cl)
cl                441 drivers/hsi/controllers/omap_ssi_port.c 		if ((cl) && (cl != msg->cl))
cl                454 drivers/hsi/controllers/omap_ssi_port.c static int ssi_setup(struct hsi_client *cl)
cl                456 drivers/hsi/controllers/omap_ssi_port.c 	struct hsi_port *port = to_hsi_port(cl->device.parent);
cl                468 drivers/hsi/controllers/omap_ssi_port.c 	if (cl->tx_cfg.speed)
cl                469 drivers/hsi/controllers/omap_ssi_port.c 		omap_ssi->max_speed = cl->tx_cfg.speed;
cl                472 drivers/hsi/controllers/omap_ssi_port.c 		dev_err(&cl->device, "Invalid TX speed %d Mb/s (div %d)\n",
cl                473 drivers/hsi/controllers/omap_ssi_port.c 						cl->tx_cfg.speed, div);
cl                485 drivers/hsi/controllers/omap_ssi_port.c 	writel_relaxed(cl->tx_cfg.num_hw_channels, sst + SSI_SST_CHANNELS_REG);
cl                486 drivers/hsi/controllers/omap_ssi_port.c 	writel_relaxed(cl->tx_cfg.arb_mode, sst + SSI_SST_ARBMODE_REG);
cl                487 drivers/hsi/controllers/omap_ssi_port.c 	writel_relaxed(cl->tx_cfg.mode, sst + SSI_SST_MODE_REG);
cl                490 drivers/hsi/controllers/omap_ssi_port.c 	writel_relaxed(cl->rx_cfg.num_hw_channels, ssr + SSI_SSR_CHANNELS_REG);
cl                494 drivers/hsi/controllers/omap_ssi_port.c 		(cl->rx_cfg.mode != SSI_MODE_FRAME))
cl                495 drivers/hsi/controllers/omap_ssi_port.c 		ssi_flush_queue(&omap_port->brkqueue, cl);
cl                496 drivers/hsi/controllers/omap_ssi_port.c 	writel_relaxed(cl->rx_cfg.mode, ssr + SSI_SSR_MODE_REG);
cl                497 drivers/hsi/controllers/omap_ssi_port.c 	omap_port->channels = max(cl->rx_cfg.num_hw_channels,
cl                498 drivers/hsi/controllers/omap_ssi_port.c 				  cl->tx_cfg.num_hw_channels);
cl                503 drivers/hsi/controllers/omap_ssi_port.c 	omap_port->sst.channels = cl->tx_cfg.num_hw_channels;
cl                504 drivers/hsi/controllers/omap_ssi_port.c 	omap_port->sst.arb_mode = cl->tx_cfg.arb_mode;
cl                505 drivers/hsi/controllers/omap_ssi_port.c 	omap_port->sst.mode = cl->tx_cfg.mode;
cl                509 drivers/hsi/controllers/omap_ssi_port.c 	omap_port->ssr.channels = cl->rx_cfg.num_hw_channels;
cl                510 drivers/hsi/controllers/omap_ssi_port.c 	omap_port->ssr.mode = cl->rx_cfg.mode;
cl                519 drivers/hsi/controllers/omap_ssi_port.c static int ssi_flush(struct hsi_client *cl)
cl                521 drivers/hsi/controllers/omap_ssi_port.c 	struct hsi_port *port = hsi_get_port(cl);
cl                541 drivers/hsi/controllers/omap_ssi_port.c 		if (!msg || (port != hsi_get_port(msg->cl)))
cl                597 drivers/hsi/controllers/omap_ssi_port.c static int ssi_start_tx(struct hsi_client *cl)
cl                599 drivers/hsi/controllers/omap_ssi_port.c 	struct hsi_port *port = hsi_get_port(cl);
cl                616 drivers/hsi/controllers/omap_ssi_port.c static int ssi_stop_tx(struct hsi_client *cl)
cl                618 drivers/hsi/controllers/omap_ssi_port.c 	struct hsi_port *port = hsi_get_port(cl);
cl                666 drivers/hsi/controllers/omap_ssi_port.c static void ssi_cleanup_queues(struct hsi_client *cl)
cl                668 drivers/hsi/controllers/omap_ssi_port.c 	struct hsi_port *port = hsi_get_port(cl);
cl                679 drivers/hsi/controllers/omap_ssi_port.c 	ssi_flush_queue(&omap_port->brkqueue, cl);
cl                688 drivers/hsi/controllers/omap_ssi_port.c 		if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
cl                695 drivers/hsi/controllers/omap_ssi_port.c 		ssi_flush_queue(&omap_port->txqueue[i], cl);
cl                702 drivers/hsi/controllers/omap_ssi_port.c 		if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
cl                706 drivers/hsi/controllers/omap_ssi_port.c 		ssi_flush_queue(&omap_port->rxqueue[i], cl);
cl                727 drivers/hsi/controllers/omap_ssi_port.c static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl)
cl                730 drivers/hsi/controllers/omap_ssi_port.c 	struct hsi_port *port = hsi_get_port(cl);
cl                739 drivers/hsi/controllers/omap_ssi_port.c 		if ((!msg) || (msg->cl != cl))
cl                769 drivers/hsi/controllers/omap_ssi_port.c static int ssi_release(struct hsi_client *cl)
cl                771 drivers/hsi/controllers/omap_ssi_port.c 	struct hsi_port *port = hsi_get_port(cl);
cl                778 drivers/hsi/controllers/omap_ssi_port.c 	ssi_cleanup_gdd(ssi, cl);
cl                780 drivers/hsi/controllers/omap_ssi_port.c 	ssi_cleanup_queues(cl);
cl                 60 drivers/hsi/hsi_core.c 	struct hsi_client *cl = to_hsi_client(dev);
cl                 62 drivers/hsi/hsi_core.c 	kfree(cl->tx_cfg.channels);
cl                 63 drivers/hsi/hsi_core.c 	kfree(cl->rx_cfg.channels);
cl                 64 drivers/hsi/hsi_core.c 	kfree(cl);
cl                 70 drivers/hsi/hsi_core.c 	struct hsi_client *cl;
cl                 73 drivers/hsi/hsi_core.c 	cl = kzalloc(sizeof(*cl), GFP_KERNEL);
cl                 74 drivers/hsi/hsi_core.c 	if (!cl)
cl                 77 drivers/hsi/hsi_core.c 	cl->tx_cfg = info->tx_cfg;
cl                 78 drivers/hsi/hsi_core.c 	if (cl->tx_cfg.channels) {
cl                 79 drivers/hsi/hsi_core.c 		size = cl->tx_cfg.num_channels * sizeof(*cl->tx_cfg.channels);
cl                 80 drivers/hsi/hsi_core.c 		cl->tx_cfg.channels = kmemdup(info->tx_cfg.channels, size,
cl                 82 drivers/hsi/hsi_core.c 		if (!cl->tx_cfg.channels)
cl                 86 drivers/hsi/hsi_core.c 	cl->rx_cfg = info->rx_cfg;
cl                 87 drivers/hsi/hsi_core.c 	if (cl->rx_cfg.channels) {
cl                 88 drivers/hsi/hsi_core.c 		size = cl->rx_cfg.num_channels * sizeof(*cl->rx_cfg.channels);
cl                 89 drivers/hsi/hsi_core.c 		cl->rx_cfg.channels = kmemdup(info->rx_cfg.channels, size,
cl                 91 drivers/hsi/hsi_core.c 		if (!cl->rx_cfg.channels)
cl                 95 drivers/hsi/hsi_core.c 	cl->device.bus = &hsi_bus_type;
cl                 96 drivers/hsi/hsi_core.c 	cl->device.parent = &port->device;
cl                 97 drivers/hsi/hsi_core.c 	cl->device.release = hsi_client_release;
cl                 98 drivers/hsi/hsi_core.c 	dev_set_name(&cl->device, "%s", info->name);
cl                 99 drivers/hsi/hsi_core.c 	cl->device.platform_data = info->platform_data;
cl                101 drivers/hsi/hsi_core.c 		cl->device.archdata = *info->archdata;
cl                102 drivers/hsi/hsi_core.c 	if (device_register(&cl->device) < 0) {
cl                104 drivers/hsi/hsi_core.c 		put_device(&cl->device);
cl                107 drivers/hsi/hsi_core.c 	return cl;
cl                109 drivers/hsi/hsi_core.c 	kfree(cl->tx_cfg.channels);
cl                111 drivers/hsi/hsi_core.c 	kfree(cl);
cl                199 drivers/hsi/hsi_core.c 	struct hsi_client *cl;
cl                205 drivers/hsi/hsi_core.c 	cl = kzalloc(sizeof(*cl), GFP_KERNEL);
cl                206 drivers/hsi/hsi_core.c 	if (!cl)
cl                213 drivers/hsi/hsi_core.c 	dev_set_name(&cl->device, "%s", name);
cl                218 drivers/hsi/hsi_core.c 						 &cl->rx_cfg.mode);
cl                223 drivers/hsi/hsi_core.c 						 &cl->tx_cfg.mode);
cl                227 drivers/hsi/hsi_core.c 		cl->rx_cfg.mode = mode;
cl                228 drivers/hsi/hsi_core.c 		cl->tx_cfg.mode = mode;
cl                232 drivers/hsi/hsi_core.c 				   &cl->tx_cfg.speed);
cl                235 drivers/hsi/hsi_core.c 	cl->rx_cfg.speed = cl->tx_cfg.speed;
cl                238 drivers/hsi/hsi_core.c 					 &cl->rx_cfg.flow);
cl                243 drivers/hsi/hsi_core.c 					     &cl->rx_cfg.arb_mode);
cl                255 drivers/hsi/hsi_core.c 	cl->rx_cfg.num_channels = cells;
cl                256 drivers/hsi/hsi_core.c 	cl->tx_cfg.num_channels = cells;
cl                257 drivers/hsi/hsi_core.c 	cl->rx_cfg.channels = kcalloc(cells, sizeof(channel), GFP_KERNEL);
cl                258 drivers/hsi/hsi_core.c 	if (!cl->rx_cfg.channels) {
cl                263 drivers/hsi/hsi_core.c 	cl->tx_cfg.channels = kcalloc(cells, sizeof(channel), GFP_KERNEL);
cl                264 drivers/hsi/hsi_core.c 	if (!cl->tx_cfg.channels) {
cl                284 drivers/hsi/hsi_core.c 		cl->rx_cfg.channels[i] = channel;
cl                285 drivers/hsi/hsi_core.c 		cl->tx_cfg.channels[i] = channel;
cl                288 drivers/hsi/hsi_core.c 	cl->rx_cfg.num_hw_channels = max_chan + 1;
cl                289 drivers/hsi/hsi_core.c 	cl->tx_cfg.num_hw_channels = max_chan + 1;
cl                291 drivers/hsi/hsi_core.c 	cl->device.bus = &hsi_bus_type;
cl                292 drivers/hsi/hsi_core.c 	cl->device.parent = &port->device;
cl                293 drivers/hsi/hsi_core.c 	cl->device.release = hsi_client_release;
cl                294 drivers/hsi/hsi_core.c 	cl->device.of_node = client;
cl                296 drivers/hsi/hsi_core.c 	if (device_register(&cl->device) < 0) {
cl                298 drivers/hsi/hsi_core.c 		put_device(&cl->device);
cl                304 drivers/hsi/hsi_core.c 	kfree(cl->tx_cfg.channels);
cl                306 drivers/hsi/hsi_core.c 	kfree(cl->rx_cfg.channels);
cl                308 drivers/hsi/hsi_core.c 	kfree(cl);
cl                427 drivers/hsi/hsi_core.c static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused)
cl                577 drivers/hsi/hsi_core.c int hsi_async(struct hsi_client *cl, struct hsi_msg *msg)
cl                579 drivers/hsi/hsi_core.c 	struct hsi_port *port = hsi_get_port(cl);
cl                581 drivers/hsi/hsi_core.c 	if (!hsi_port_claimed(cl))
cl                585 drivers/hsi/hsi_core.c 	msg->cl = cl;
cl                598 drivers/hsi/hsi_core.c int hsi_claim_port(struct hsi_client *cl, unsigned int share)
cl                600 drivers/hsi/hsi_core.c 	struct hsi_port *port = hsi_get_port(cl);
cl                614 drivers/hsi/hsi_core.c 	cl->pclaimed = 1;
cl                626 drivers/hsi/hsi_core.c void hsi_release_port(struct hsi_client *cl)
cl                628 drivers/hsi/hsi_core.c 	struct hsi_port *port = hsi_get_port(cl);
cl                632 drivers/hsi/hsi_core.c 	port->release(cl);
cl                633 drivers/hsi/hsi_core.c 	if (cl->pclaimed)
cl                636 drivers/hsi/hsi_core.c 	cl->pclaimed = 0;
cl                647 drivers/hsi/hsi_core.c 	struct hsi_client *cl = container_of(nb, struct hsi_client, nb);
cl                649 drivers/hsi/hsi_core.c 	(*cl->ehandler)(cl, event);
cl                666 drivers/hsi/hsi_core.c int hsi_register_port_event(struct hsi_client *cl,
cl                669 drivers/hsi/hsi_core.c 	struct hsi_port *port = hsi_get_port(cl);
cl                671 drivers/hsi/hsi_core.c 	if (!handler || cl->ehandler)
cl                673 drivers/hsi/hsi_core.c 	if (!hsi_port_claimed(cl))
cl                675 drivers/hsi/hsi_core.c 	cl->ehandler = handler;
cl                676 drivers/hsi/hsi_core.c 	cl->nb.notifier_call = hsi_event_notifier_call;
cl                678 drivers/hsi/hsi_core.c 	return blocking_notifier_chain_register(&port->n_head, &cl->nb);
cl                691 drivers/hsi/hsi_core.c int hsi_unregister_port_event(struct hsi_client *cl)
cl                693 drivers/hsi/hsi_core.c 	struct hsi_port *port = hsi_get_port(cl);
cl                696 drivers/hsi/hsi_core.c 	WARN_ON(!hsi_port_claimed(cl));
cl                698 drivers/hsi/hsi_core.c 	err = blocking_notifier_chain_unregister(&port->n_head, &cl->nb);
cl                700 drivers/hsi/hsi_core.c 		cl->ehandler = NULL;
cl                738 drivers/hsi/hsi_core.c int hsi_get_channel_id_by_name(struct hsi_client *cl, char *name)
cl                742 drivers/hsi/hsi_core.c 	if (!cl->rx_cfg.channels)
cl                745 drivers/hsi/hsi_core.c 	for (i = 0; i < cl->rx_cfg.num_channels; i++)
cl                746 drivers/hsi/hsi_core.c 		if (!strcmp(cl->rx_cfg.channels[i].name, name))
cl                747 drivers/hsi/hsi_core.c 			return cl->rx_cfg.channels[i].id;
cl                847 drivers/hwmon/asb100.c 	struct i2c_client *cl;
cl                861 drivers/hwmon/asb100.c 		cl = data->lm75[bank - 1];
cl                866 drivers/hwmon/asb100.c 			res = i2c_smbus_read_word_swapped(cl, 0);
cl                869 drivers/hwmon/asb100.c 			res = i2c_smbus_read_byte_data(cl, 1);
cl                872 drivers/hwmon/asb100.c 			res = i2c_smbus_read_word_swapped(cl, 2);
cl                876 drivers/hwmon/asb100.c 			res = i2c_smbus_read_word_swapped(cl, 3);
cl                892 drivers/hwmon/asb100.c 	struct i2c_client *cl;
cl                906 drivers/hwmon/asb100.c 		cl = data->lm75[bank - 1];
cl                911 drivers/hwmon/asb100.c 			i2c_smbus_write_byte_data(cl, 1, value & 0xff);
cl                914 drivers/hwmon/asb100.c 			i2c_smbus_write_word_swapped(cl, 2, value);
cl                917 drivers/hwmon/asb100.c 			i2c_smbus_write_word_swapped(cl, 3, value);
cl               1261 drivers/hwmon/w83781d.c 	struct i2c_client *cl;
cl               1272 drivers/hwmon/w83781d.c 		cl = data->lm75[bank - 1];
cl               1276 drivers/hwmon/w83781d.c 			res = i2c_smbus_read_word_swapped(cl, 0);
cl               1279 drivers/hwmon/w83781d.c 			res = i2c_smbus_read_byte_data(cl, 1);
cl               1282 drivers/hwmon/w83781d.c 			res = i2c_smbus_read_word_swapped(cl, 2);
cl               1286 drivers/hwmon/w83781d.c 			res = i2c_smbus_read_word_swapped(cl, 3);
cl               1301 drivers/hwmon/w83781d.c 	struct i2c_client *cl;
cl               1313 drivers/hwmon/w83781d.c 		cl = data->lm75[bank - 1];
cl               1317 drivers/hwmon/w83781d.c 			i2c_smbus_write_byte_data(cl, 1, value & 0xff);
cl               1320 drivers/hwmon/w83781d.c 			i2c_smbus_write_word_swapped(cl, 2, value);
cl               1323 drivers/hwmon/w83781d.c 			i2c_smbus_write_word_swapped(cl, 3, value);
cl                 79 drivers/hwmon/xgene-hwmon.c #define to_xgene_hwmon_dev(cl)		\
cl                 80 drivers/hwmon/xgene-hwmon.c 	container_of(cl, struct xgene_hwmon_dev, mbox_client)
cl                478 drivers/hwmon/xgene-hwmon.c static void xgene_hwmon_rx_cb(struct mbox_client *cl, void *msg)
cl                480 drivers/hwmon/xgene-hwmon.c 	struct xgene_hwmon_dev *ctx = to_xgene_hwmon_dev(cl);
cl                528 drivers/hwmon/xgene-hwmon.c static void xgene_hwmon_pcc_rx_cb(struct mbox_client *cl, void *msg)
cl                530 drivers/hwmon/xgene-hwmon.c 	struct xgene_hwmon_dev *ctx = to_xgene_hwmon_dev(cl);
cl                594 drivers/hwmon/xgene-hwmon.c static void xgene_hwmon_tx_done(struct mbox_client *cl, void *msg, int ret)
cl                597 drivers/hwmon/xgene-hwmon.c 		dev_dbg(cl->dev, "TX did not complete: CMD sent:%x, ret:%d\n",
cl                600 drivers/hwmon/xgene-hwmon.c 		dev_dbg(cl->dev, "TX completed. CMD sent:%x, ret:%d\n",
cl                617 drivers/hwmon/xgene-hwmon.c 	struct mbox_client *cl;
cl                626 drivers/hwmon/xgene-hwmon.c 	cl = &ctx->mbox_client;
cl                640 drivers/hwmon/xgene-hwmon.c 	cl->dev = &pdev->dev;
cl                641 drivers/hwmon/xgene-hwmon.c 	cl->tx_done = xgene_hwmon_tx_done;
cl                642 drivers/hwmon/xgene-hwmon.c 	cl->tx_block = false;
cl                643 drivers/hwmon/xgene-hwmon.c 	cl->tx_tout = MBOX_OP_TIMEOUTMS;
cl                644 drivers/hwmon/xgene-hwmon.c 	cl->knows_txdone = false;
cl                646 drivers/hwmon/xgene-hwmon.c 		cl->rx_callback = xgene_hwmon_rx_cb;
cl                647 drivers/hwmon/xgene-hwmon.c 		ctx->mbox_chan = mbox_request_channel(cl, 0);
cl                673 drivers/hwmon/xgene-hwmon.c 		cl->rx_callback = xgene_hwmon_pcc_rx_cb;
cl                674 drivers/hwmon/xgene-hwmon.c 		ctx->mbox_chan = pcc_mbox_request_channel(cl, ctx->mbox_idx);
cl                116 drivers/i2c/busses/i2c-xgene-slimpro.c #define to_slimpro_i2c_dev(cl)	\
cl                117 drivers/i2c/busses/i2c-xgene-slimpro.c 		container_of(cl, struct slimpro_i2c_dev, mbox_client)
cl                139 drivers/i2c/busses/i2c-xgene-slimpro.c static void slimpro_i2c_rx_cb(struct mbox_client *cl, void *mssg)
cl                141 drivers/i2c/busses/i2c-xgene-slimpro.c 	struct slimpro_i2c_dev *ctx = to_slimpro_i2c_dev(cl);
cl                156 drivers/i2c/busses/i2c-xgene-slimpro.c static void slimpro_i2c_pcc_rx_cb(struct mbox_client *cl, void *msg)
cl                158 drivers/i2c/busses/i2c-xgene-slimpro.c 	struct slimpro_i2c_dev *ctx = to_slimpro_i2c_dev(cl);
cl                445 drivers/i2c/busses/i2c-xgene-slimpro.c 	struct mbox_client *cl;
cl                454 drivers/i2c/busses/i2c-xgene-slimpro.c 	cl = &ctx->mbox_client;
cl                457 drivers/i2c/busses/i2c-xgene-slimpro.c 	cl->dev = &pdev->dev;
cl                459 drivers/i2c/busses/i2c-xgene-slimpro.c 	cl->tx_tout = MAILBOX_OP_TIMEOUT;
cl                460 drivers/i2c/busses/i2c-xgene-slimpro.c 	cl->knows_txdone = false;
cl                462 drivers/i2c/busses/i2c-xgene-slimpro.c 		cl->tx_block = true;
cl                463 drivers/i2c/busses/i2c-xgene-slimpro.c 		cl->rx_callback = slimpro_i2c_rx_cb;
cl                464 drivers/i2c/busses/i2c-xgene-slimpro.c 		ctx->mbox_chan = mbox_request_channel(cl, MAILBOX_I2C_INDEX);
cl                485 drivers/i2c/busses/i2c-xgene-slimpro.c 		cl->tx_block = false;
cl                486 drivers/i2c/busses/i2c-xgene-slimpro.c 		cl->rx_callback = slimpro_i2c_pcc_rx_cb;
cl                487 drivers/i2c/busses/i2c-xgene-slimpro.c 		ctx->mbox_chan = pcc_mbox_request_channel(cl, ctx->mbox_idx);
cl                 36 drivers/input/tablet/hanwang.c #define HANWANG_TABLET_DEVICE(vend, cl, sc, pr) \
cl                 40 drivers/input/tablet/hanwang.c 	.bInterfaceClass = (cl), \
cl                256 drivers/leds/leds-lp5521.c 	dev_err(&chip->cl->dev, "wrong pattern format\n");
cl                265 drivers/leds/leds-lp5521.c 		dev_err(&chip->cl->dev, "firmware data size overflow: %zu\n",
cl                293 drivers/leds/leds-lp5521.c 		dev_err(&chip->cl->dev, "error in resetting chip\n");
cl                297 drivers/leds/leds-lp5521.c 		dev_err(&chip->cl->dev,
cl                528 drivers/leds/leds-lp5521.c 	chip->cl = client;
cl                314 drivers/leds/leds-lp5523.c 		dev_err(&chip->cl->dev,
cl                364 drivers/leds/leds-lp5523.c 	dev_err(&chip->cl->dev, "wrong pattern format\n");
cl                373 drivers/leds/leds-lp5523.c 		dev_err(&chip->cl->dev, "firmware data size overflow: %zu\n",
cl                895 drivers/leds/leds-lp5523.c 	chip->cl = client;
cl                255 drivers/leds/leds-lp5562.c 	dev_err(&chip->cl->dev, "wrong pattern format\n");
cl                268 drivers/leds/leds-lp5562.c 		dev_err(&chip->cl->dev, "firmware data size overflow: %zu\n",
cl                368 drivers/leds/leds-lp5562.c 		dev_err(&chip->cl->dev, "invalid pattern data\n");
cl                543 drivers/leds/leds-lp5562.c 	chip->cl = client;
cl                149 drivers/leds/leds-lp55xx-common.c 	struct device *dev = &chip->cl->dev;
cl                180 drivers/leds/leds-lp55xx-common.c 			pdata->label ? : chip->cl->name, chan);
cl                196 drivers/leds/leds-lp55xx-common.c 	struct device *dev = &chip->cl->dev;
cl                221 drivers/leds/leds-lp55xx-common.c 	const char *name = chip->cl->name;
cl                222 drivers/leds/leds-lp55xx-common.c 	struct device *dev = &chip->cl->dev;
cl                321 drivers/leds/leds-lp55xx-common.c 	return i2c_smbus_write_byte_data(chip->cl, reg, val);
cl                329 drivers/leds/leds-lp55xx-common.c 	ret = i2c_smbus_read_byte_data(chip->cl, reg);
cl                359 drivers/leds/leds-lp55xx-common.c 	clk = devm_clk_get(&chip->cl->dev, "32k_clk");
cl                372 drivers/leds/leds-lp55xx-common.c 	dev_info(&chip->cl->dev, "%dHz external clock used\n",	LP55XX_CLK_32K);
cl                378 drivers/leds/leds-lp55xx-common.c 	dev_info(&chip->cl->dev, "internal clock used\n");
cl                387 drivers/leds/leds-lp55xx-common.c 	struct device *dev = &chip->cl->dev;
cl                466 drivers/leds/leds-lp55xx-common.c 		dev_err(&chip->cl->dev, "empty brightness configuration\n");
cl                512 drivers/leds/leds-lp55xx-common.c 	struct device *dev = &chip->cl->dev;
cl                531 drivers/leds/leds-lp55xx-common.c 	struct device *dev = &chip->cl->dev;
cl                147 drivers/leds/leds-lp55xx-common.h 	struct i2c_client *cl;
cl                247 drivers/leds/leds-lp8501.c 	dev_err(&chip->cl->dev, "wrong pattern format\n");
cl                256 drivers/leds/leds-lp8501.c 		dev_err(&chip->cl->dev, "firmware data size overflow: %zu\n",
cl                331 drivers/leds/leds-lp8501.c 	chip->cl = client;
cl                360 drivers/leds/leds-tca6507.c 	struct i2c_client *cl = tca->client;
cl                373 drivers/leds/leds-tca6507.c 			i2c_smbus_write_byte_data(cl, r, file[r]);
cl                120 drivers/macintosh/therm_windtunnel.c write_reg( struct i2c_client *cl, int reg, int data, int len )
cl                132 drivers/macintosh/therm_windtunnel.c 	if( i2c_master_send(cl, tmp, len) != len )
cl                138 drivers/macintosh/therm_windtunnel.c read_reg( struct i2c_client *cl, int reg, int len )
cl                145 drivers/macintosh/therm_windtunnel.c 	if( i2c_master_send(cl, buf, 1) != 1 )
cl                147 drivers/macintosh/therm_windtunnel.c 	if( i2c_master_recv(cl, buf, len) != len )
cl                357 drivers/macintosh/therm_windtunnel.c attach_fan( struct i2c_client *cl )
cl                363 drivers/macintosh/therm_windtunnel.c 	if( read_reg(cl, 0x3d, 1) != 0x30 || read_reg(cl, 0x3e, 1) != 0x41 )
cl                365 drivers/macintosh/therm_windtunnel.c 	printk("ADM1030 fan controller [@%02x]\n", cl->addr );
cl                367 drivers/macintosh/therm_windtunnel.c 	x.fan = cl;
cl                373 drivers/macintosh/therm_windtunnel.c attach_thermostat( struct i2c_client *cl ) 
cl                380 drivers/macintosh/therm_windtunnel.c 	if( (temp=read_reg(cl, 0, 2)) < 0 )
cl                386 drivers/macintosh/therm_windtunnel.c 	hyst_temp = read_reg(cl, 2, 2);
cl                387 drivers/macintosh/therm_windtunnel.c 	os_temp = read_reg(cl, 3, 2);
cl                391 drivers/macintosh/therm_windtunnel.c 	printk("DS1775 digital thermometer [@%02x]\n", cl->addr );
cl                400 drivers/macintosh/therm_windtunnel.c 	x.thermostat = cl;
cl                415 drivers/macintosh/therm_windtunnel.c do_probe(struct i2c_client *cl, const struct i2c_device_id *id)
cl                417 drivers/macintosh/therm_windtunnel.c 	struct i2c_adapter *adapter = cl->adapter;
cl                426 drivers/macintosh/therm_windtunnel.c 		ret = attach_fan(cl);
cl                429 drivers/macintosh/therm_windtunnel.c 		ret = attach_thermostat(cl);
cl                 74 drivers/mailbox/mailbox.c 	if (chan->cl->tx_prepare)
cl                 75 drivers/mailbox/mailbox.c 		chan->cl->tx_prepare(chan->cl, data);
cl                107 drivers/mailbox/mailbox.c 	if (chan->cl->tx_done)
cl                108 drivers/mailbox/mailbox.c 		chan->cl->tx_done(chan->cl, mssg, r);
cl                110 drivers/mailbox/mailbox.c 	if (r != -ETIME && chan->cl->tx_block)
cl                124 drivers/mailbox/mailbox.c 		if (chan->active_req && chan->cl) {
cl                153 drivers/mailbox/mailbox.c 	if (chan->cl->rx_callback)
cl                154 drivers/mailbox/mailbox.c 		chan->cl->rx_callback(chan->cl, mssg);
cl                252 drivers/mailbox/mailbox.c 	if (!chan || !chan->cl)
cl                263 drivers/mailbox/mailbox.c 	if (chan->cl->tx_block) {
cl                267 drivers/mailbox/mailbox.c 		if (!chan->cl->tx_tout) /* wait forever */
cl                270 drivers/mailbox/mailbox.c 			wait = msecs_to_jiffies(chan->cl->tx_tout);
cl                329 drivers/mailbox/mailbox.c struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
cl                331 drivers/mailbox/mailbox.c 	struct device *dev = cl->dev;
cl                367 drivers/mailbox/mailbox.c 	if (chan->cl || !try_module_get(mbox->dev->driver->owner)) {
cl                377 drivers/mailbox/mailbox.c 	chan->cl = cl;
cl                380 drivers/mailbox/mailbox.c 	if (chan->txdone_method	== TXDONE_BY_POLL && cl->knows_txdone)
cl                400 drivers/mailbox/mailbox.c struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
cl                403 drivers/mailbox/mailbox.c 	struct device_node *np = cl->dev->of_node;
cl                409 drivers/mailbox/mailbox.c 		dev_err(cl->dev, "%s() currently only supports DT\n", __func__);
cl                414 drivers/mailbox/mailbox.c 		dev_err(cl->dev,
cl                421 drivers/mailbox/mailbox.c 			return mbox_request_channel(cl, index);
cl                425 drivers/mailbox/mailbox.c 	dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
cl                440 drivers/mailbox/mailbox.c 	if (!chan || !chan->cl)
cl                448 drivers/mailbox/mailbox.c 	chan->cl = NULL;
cl                506 drivers/mailbox/mailbox.c 		chan->cl = NULL;
cl                414 drivers/mailbox/omap-mailbox.c struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl,
cl                417 drivers/mailbox/omap-mailbox.c 	struct device *dev = cl->dev;
cl                449 drivers/mailbox/omap-mailbox.c 	chan->cl = cl;
cl                230 drivers/mailbox/pcc.c struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
cl                246 drivers/mailbox/pcc.c 	if (IS_ERR(chan) || chan->cl) {
cl                255 drivers/mailbox/pcc.c 	chan->cl = cl;
cl                258 drivers/mailbox/pcc.c 	if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
cl                291 drivers/mailbox/pcc.c 	if (!chan || !chan->cl)
cl                303 drivers/mailbox/pcc.c 	chan->cl = NULL;
cl                789 drivers/mailbox/tegra-hsp.c 			if (mb->channel.chan->cl)
cl                250 drivers/md/bcache/bcache.h 	struct closure		cl;
cl                506 drivers/md/bcache/bcache.h 	struct closure		cl;
cl                920 drivers/md/bcache/bcache.h 				      struct closure *cl)
cl                922 drivers/md/bcache/bcache.h 	closure_get(cl);
cl                294 drivers/md/bcache/btree.c 	struct closure *cl = bio->bi_private;
cl                296 drivers/md/bcache/btree.c 	closure_put(cl);
cl                302 drivers/md/bcache/btree.c 	struct closure cl;
cl                307 drivers/md/bcache/btree.c 	closure_init_stack(&cl);
cl                312 drivers/md/bcache/btree.c 	bio->bi_private	= &cl;
cl                318 drivers/md/bcache/btree.c 	closure_sync(&cl);
cl                352 drivers/md/bcache/btree.c static void btree_node_write_unlock(struct closure *cl)
cl                354 drivers/md/bcache/btree.c 	struct btree *b = container_of(cl, struct btree, io);
cl                359 drivers/md/bcache/btree.c static void __btree_node_write_done(struct closure *cl)
cl                361 drivers/md/bcache/btree.c 	struct btree *b = container_of(cl, struct btree, io);
cl                371 drivers/md/bcache/btree.c 	closure_return_with_destructor(cl, btree_node_write_unlock);
cl                374 drivers/md/bcache/btree.c static void btree_node_write_done(struct closure *cl)
cl                376 drivers/md/bcache/btree.c 	struct btree *b = container_of(cl, struct btree, io);
cl                379 drivers/md/bcache/btree.c 	__btree_node_write_done(cl);
cl                384 drivers/md/bcache/btree.c 	struct closure *cl = bio->bi_private;
cl                385 drivers/md/bcache/btree.c 	struct btree *b = container_of(cl, struct btree, io);
cl                391 drivers/md/bcache/btree.c 	closure_put(cl);
cl                396 drivers/md/bcache/btree.c 	struct closure *cl = &b->io;
cl                407 drivers/md/bcache/btree.c 	b->bio->bi_private	= cl;
cl                443 drivers/md/bcache/btree.c 		continue_at(cl, btree_node_write_done, NULL);
cl                454 drivers/md/bcache/btree.c 		closure_sync(cl);
cl                455 drivers/md/bcache/btree.c 		continue_at_nobarrier(cl, __btree_node_write_done, NULL);
cl                477 drivers/md/bcache/btree.c 	closure_init(&b->io, parent ?: &b->c->cl);
cl                510 drivers/md/bcache/btree.c 	struct closure cl;
cl                512 drivers/md/bcache/btree.c 	closure_init_stack(&cl);
cl                515 drivers/md/bcache/btree.c 	bch_btree_node_write(b, &cl);
cl                518 drivers/md/bcache/btree.c 	closure_sync(&cl);
cl                640 drivers/md/bcache/btree.c 	struct closure cl;
cl                642 drivers/md/bcache/btree.c 	closure_init_stack(&cl);
cl                682 drivers/md/bcache/btree.c 		__bch_btree_node_write(b, &cl);
cl                685 drivers/md/bcache/btree.c 	closure_sync(&cl);
cl                783 drivers/md/bcache/btree.c 	struct closure cl;
cl                785 drivers/md/bcache/btree.c 	closure_init_stack(&cl);
cl               1375 drivers/md/bcache/btree.c 	struct closure cl;
cl               1384 drivers/md/bcache/btree.c 	closure_init_stack(&cl);
cl               1476 drivers/md/bcache/btree.c 		bch_btree_node_write(new_nodes[i], &cl);
cl               1483 drivers/md/bcache/btree.c 	closure_sync(&cl);
cl               1521 drivers/md/bcache/btree.c 	closure_sync(&cl);
cl               2084 drivers/md/bcache/btree.c 	struct closure cl;
cl               2087 drivers/md/bcache/btree.c 	closure_init_stack(&cl);
cl               2147 drivers/md/bcache/btree.c 		bch_btree_node_write(n2, &cl);
cl               2158 drivers/md/bcache/btree.c 	bch_btree_node_write(n1, &cl);
cl               2166 drivers/md/bcache/btree.c 		bch_btree_node_write(n3, &cl);
cl               2169 drivers/md/bcache/btree.c 		closure_sync(&cl);
cl               2174 drivers/md/bcache/btree.c 		closure_sync(&cl);
cl               2178 drivers/md/bcache/btree.c 		closure_sync(&cl);
cl               2216 drivers/md/bcache/btree.c 	struct closure cl;
cl               2220 drivers/md/bcache/btree.c 	closure_init_stack(&cl);
cl               2239 drivers/md/bcache/btree.c 			bch_btree_node_write(b, &cl);
cl               2245 drivers/md/bcache/btree.c 	closure_sync(&cl);
cl               2362 drivers/md/bcache/btree.c 	struct closure cl;
cl               2364 drivers/md/bcache/btree.c 	closure_init_stack(&cl);
cl               2379 drivers/md/bcache/btree.c 	bch_journal_meta(b->c, &cl);
cl               2380 drivers/md/bcache/btree.c 	closure_sync(&cl);
cl                 16 drivers/md/bcache/closure.c static inline void closure_put_after_sub(struct closure *cl, int flags)
cl                 24 drivers/md/bcache/closure.c 		if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
cl                 25 drivers/md/bcache/closure.c 			atomic_set(&cl->remaining,
cl                 27 drivers/md/bcache/closure.c 			closure_queue(cl);
cl                 29 drivers/md/bcache/closure.c 			struct closure *parent = cl->parent;
cl                 30 drivers/md/bcache/closure.c 			closure_fn *destructor = cl->fn;
cl                 32 drivers/md/bcache/closure.c 			closure_debug_destroy(cl);
cl                 35 drivers/md/bcache/closure.c 				destructor(cl);
cl                 44 drivers/md/bcache/closure.c void closure_sub(struct closure *cl, int v)
cl                 46 drivers/md/bcache/closure.c 	closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
cl                 53 drivers/md/bcache/closure.c void closure_put(struct closure *cl)
cl                 55 drivers/md/bcache/closure.c 	closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
cl                 65 drivers/md/bcache/closure.c 	struct closure *cl, *t;
cl                 74 drivers/md/bcache/closure.c 	llist_for_each_entry_safe(cl, t, reverse, list) {
cl                 75 drivers/md/bcache/closure.c 		closure_set_waiting(cl, 0);
cl                 76 drivers/md/bcache/closure.c 		closure_sub(cl, CLOSURE_WAITING + 1);
cl                 88 drivers/md/bcache/closure.c bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
cl                 90 drivers/md/bcache/closure.c 	if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
cl                 93 drivers/md/bcache/closure.c 	closure_set_waiting(cl, _RET_IP_);
cl                 94 drivers/md/bcache/closure.c 	atomic_add(CLOSURE_WAITING + 1, &cl->remaining);
cl                 95 drivers/md/bcache/closure.c 	llist_add(&cl->list, &waitlist->list);
cl                106 drivers/md/bcache/closure.c static void closure_sync_fn(struct closure *cl)
cl                108 drivers/md/bcache/closure.c 	struct closure_syncer *s = cl->s;
cl                118 drivers/md/bcache/closure.c void __sched __closure_sync(struct closure *cl)
cl                122 drivers/md/bcache/closure.c 	cl->s = &s;
cl                123 drivers/md/bcache/closure.c 	continue_at(cl, closure_sync_fn, NULL);
cl                141 drivers/md/bcache/closure.c void closure_debug_create(struct closure *cl)
cl                145 drivers/md/bcache/closure.c 	BUG_ON(cl->magic == CLOSURE_MAGIC_ALIVE);
cl                146 drivers/md/bcache/closure.c 	cl->magic = CLOSURE_MAGIC_ALIVE;
cl                149 drivers/md/bcache/closure.c 	list_add(&cl->all, &closure_list);
cl                154 drivers/md/bcache/closure.c void closure_debug_destroy(struct closure *cl)
cl                158 drivers/md/bcache/closure.c 	BUG_ON(cl->magic != CLOSURE_MAGIC_ALIVE);
cl                159 drivers/md/bcache/closure.c 	cl->magic = CLOSURE_MAGIC_DEAD;
cl                162 drivers/md/bcache/closure.c 	list_del(&cl->all);
cl                171 drivers/md/bcache/closure.c 	struct closure *cl;
cl                175 drivers/md/bcache/closure.c 	list_for_each_entry(cl, &closure_list, all) {
cl                176 drivers/md/bcache/closure.c 		int r = atomic_read(&cl->remaining);
cl                179 drivers/md/bcache/closure.c 			   cl, (void *) cl->ip, cl->fn, cl->parent,
cl                184 drivers/md/bcache/closure.c 				    work_data_bits(&cl->work)) ? "Q" : "",
cl                189 drivers/md/bcache/closure.c 				   (void *) cl->waiting_on);
cl                169 drivers/md/bcache/closure.h void closure_sub(struct closure *cl, int v);
cl                170 drivers/md/bcache/closure.h void closure_put(struct closure *cl);
cl                172 drivers/md/bcache/closure.h bool closure_wait(struct closure_waitlist *list, struct closure *cl);
cl                173 drivers/md/bcache/closure.h void __closure_sync(struct closure *cl);
cl                181 drivers/md/bcache/closure.h static inline void closure_sync(struct closure *cl)
cl                183 drivers/md/bcache/closure.h 	if ((atomic_read(&cl->remaining) & CLOSURE_REMAINING_MASK) != 1)
cl                184 drivers/md/bcache/closure.h 		__closure_sync(cl);
cl                190 drivers/md/bcache/closure.h void closure_debug_create(struct closure *cl);
cl                191 drivers/md/bcache/closure.h void closure_debug_destroy(struct closure *cl);
cl                196 drivers/md/bcache/closure.h static inline void closure_debug_create(struct closure *cl) {}
cl                197 drivers/md/bcache/closure.h static inline void closure_debug_destroy(struct closure *cl) {}
cl                201 drivers/md/bcache/closure.h static inline void closure_set_ip(struct closure *cl)
cl                204 drivers/md/bcache/closure.h 	cl->ip = _THIS_IP_;
cl                208 drivers/md/bcache/closure.h static inline void closure_set_ret_ip(struct closure *cl)
cl                211 drivers/md/bcache/closure.h 	cl->ip = _RET_IP_;
cl                215 drivers/md/bcache/closure.h static inline void closure_set_waiting(struct closure *cl, unsigned long f)
cl                218 drivers/md/bcache/closure.h 	cl->waiting_on = f;
cl                222 drivers/md/bcache/closure.h static inline void closure_set_stopped(struct closure *cl)
cl                224 drivers/md/bcache/closure.h 	atomic_sub(CLOSURE_RUNNING, &cl->remaining);
cl                227 drivers/md/bcache/closure.h static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
cl                230 drivers/md/bcache/closure.h 	closure_set_ip(cl);
cl                231 drivers/md/bcache/closure.h 	cl->fn = fn;
cl                232 drivers/md/bcache/closure.h 	cl->wq = wq;
cl                237 drivers/md/bcache/closure.h static inline void closure_queue(struct closure *cl)
cl                239 drivers/md/bcache/closure.h 	struct workqueue_struct *wq = cl->wq;
cl                247 drivers/md/bcache/closure.h 		INIT_WORK(&cl->work, cl->work.func);
cl                248 drivers/md/bcache/closure.h 		BUG_ON(!queue_work(wq, &cl->work));
cl                250 drivers/md/bcache/closure.h 		cl->fn(cl);
cl                256 drivers/md/bcache/closure.h static inline void closure_get(struct closure *cl)
cl                259 drivers/md/bcache/closure.h 	BUG_ON((atomic_inc_return(&cl->remaining) &
cl                262 drivers/md/bcache/closure.h 	atomic_inc(&cl->remaining);
cl                272 drivers/md/bcache/closure.h static inline void closure_init(struct closure *cl, struct closure *parent)
cl                274 drivers/md/bcache/closure.h 	memset(cl, 0, sizeof(struct closure));
cl                275 drivers/md/bcache/closure.h 	cl->parent = parent;
cl                279 drivers/md/bcache/closure.h 	atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
cl                281 drivers/md/bcache/closure.h 	closure_debug_create(cl);
cl                282 drivers/md/bcache/closure.h 	closure_set_ip(cl);
cl                285 drivers/md/bcache/closure.h static inline void closure_init_stack(struct closure *cl)
cl                287 drivers/md/bcache/closure.h 	memset(cl, 0, sizeof(struct closure));
cl                288 drivers/md/bcache/closure.h 	atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
cl                370 drivers/md/bcache/closure.h static inline void closure_call(struct closure *cl, closure_fn fn,
cl                374 drivers/md/bcache/closure.h 	closure_init(cl, parent);
cl                375 drivers/md/bcache/closure.h 	continue_at_nobarrier(cl, fn, wq);
cl                169 drivers/md/bcache/io.c 	struct closure *cl = bio->bi_private;
cl                173 drivers/md/bcache/io.c 	closure_put(cl);
cl                 30 drivers/md/bcache/journal.c 	struct closure *cl = bio->bi_private;
cl                 32 drivers/md/bcache/journal.c 	closure_put(cl);
cl                 43 drivers/md/bcache/journal.c 	struct closure cl;
cl                 48 drivers/md/bcache/journal.c 	closure_init_stack(&cl);
cl                 62 drivers/md/bcache/journal.c 		bio->bi_private = &cl;
cl                 66 drivers/md/bcache/journal.c 		closure_bio_submit(ca->set, bio, &cl);
cl                 67 drivers/md/bcache/journal.c 		closure_sync(&cl);
cl                587 drivers/md/bcache/journal.c 	closure_put(&ca->set->cl);
cl                633 drivers/md/bcache/journal.c 		closure_get(&ca->set->cl);
cl                733 drivers/md/bcache/journal.c static void journal_write(struct closure *cl);
cl                735 drivers/md/bcache/journal.c static void journal_write_done(struct closure *cl)
cl                737 drivers/md/bcache/journal.c 	struct journal *j = container_of(cl, struct journal, io);
cl                743 drivers/md/bcache/journal.c 	continue_at_nobarrier(cl, journal_write, bch_journal_wq);
cl                746 drivers/md/bcache/journal.c static void journal_write_unlock(struct closure *cl)
cl                749 drivers/md/bcache/journal.c 	struct cache_set *c = container_of(cl, struct cache_set, journal.io);
cl                755 drivers/md/bcache/journal.c static void journal_write_unlocked(struct closure *cl)
cl                758 drivers/md/bcache/journal.c 	struct cache_set *c = container_of(cl, struct cache_set, journal.io);
cl                771 drivers/md/bcache/journal.c 		closure_return_with_destructor(cl, journal_write_unlock);
cl                778 drivers/md/bcache/journal.c 		continue_at(cl, journal_write, bch_journal_wq);
cl                832 drivers/md/bcache/journal.c 		closure_bio_submit(c, bio, cl);
cl                834 drivers/md/bcache/journal.c 	continue_at(cl, journal_write_done, NULL);
cl                837 drivers/md/bcache/journal.c static void journal_write(struct closure *cl)
cl                839 drivers/md/bcache/journal.c 	struct cache_set *c = container_of(cl, struct cache_set, journal.io);
cl                842 drivers/md/bcache/journal.c 	journal_write_unlocked(cl);
cl                848 drivers/md/bcache/journal.c 	struct closure *cl = &c->journal.io;
cl                855 drivers/md/bcache/journal.c 		closure_call(cl, journal_write_unlocked, NULL, &c->cl);
cl                866 drivers/md/bcache/journal.c 	struct closure cl;
cl                869 drivers/md/bcache/journal.c 	closure_init_stack(&cl);
cl                885 drivers/md/bcache/journal.c 			closure_wait(&c->journal.wait, &cl);
cl                910 drivers/md/bcache/journal.c 		closure_sync(&cl);
cl                972 drivers/md/bcache/journal.c void bch_journal_meta(struct cache_set *c, struct closure *cl)
cl                979 drivers/md/bcache/journal.c 	ref = bch_journal(c, &keys, cl);
cl                179 drivers/md/bcache/journal.h void bch_journal_meta(struct cache_set *c, struct closure *cl);
cl                 16 drivers/md/bcache/movinggc.c 	struct closure		cl;
cl                 38 drivers/md/bcache/movinggc.c static void moving_io_destructor(struct closure *cl)
cl                 40 drivers/md/bcache/movinggc.c 	struct moving_io *io = container_of(cl, struct moving_io, cl);
cl                 45 drivers/md/bcache/movinggc.c static void write_moving_finish(struct closure *cl)
cl                 47 drivers/md/bcache/movinggc.c 	struct moving_io *io = container_of(cl, struct moving_io, cl);
cl                 59 drivers/md/bcache/movinggc.c 	closure_return_with_destructor(cl, moving_io_destructor);
cl                 66 drivers/md/bcache/movinggc.c 					    struct moving_io, cl);
cl                 88 drivers/md/bcache/movinggc.c 	bio->bi_private		= &io->cl;
cl                 92 drivers/md/bcache/movinggc.c static void write_moving(struct closure *cl)
cl                 94 drivers/md/bcache/movinggc.c 	struct moving_io *io = container_of(cl, struct moving_io, cl);
cl                110 drivers/md/bcache/movinggc.c 		closure_call(&op->cl, bch_data_insert, NULL, cl);
cl                113 drivers/md/bcache/movinggc.c 	continue_at(cl, write_moving_finish, op->wq);
cl                116 drivers/md/bcache/movinggc.c static void read_moving_submit(struct closure *cl)
cl                118 drivers/md/bcache/movinggc.c 	struct moving_io *io = container_of(cl, struct moving_io, cl);
cl                123 drivers/md/bcache/movinggc.c 	continue_at(cl, write_moving, io->op.wq);
cl                131 drivers/md/bcache/movinggc.c 	struct closure cl;
cl                133 drivers/md/bcache/movinggc.c 	closure_init_stack(&cl);
cl                172 drivers/md/bcache/movinggc.c 		closure_call(&io->cl, read_moving_submit, NULL, &cl);
cl                182 drivers/md/bcache/movinggc.c 	closure_sync(&cl);
cl                 28 drivers/md/bcache/request.c static void bch_data_insert_start(struct closure *cl);
cl                 58 drivers/md/bcache/request.c static void bch_data_insert_keys(struct closure *cl)
cl                 60 drivers/md/bcache/request.c 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
cl                 73 drivers/md/bcache/request.c 	while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
cl                 74 drivers/md/bcache/request.c 		closure_sync(&s->cl);
cl                 79 drivers/md/bcache/request.c 					  op->flush_journal ? cl : NULL);
cl                 94 drivers/md/bcache/request.c 		continue_at(cl, bch_data_insert_start, op->wq);
cl                 99 drivers/md/bcache/request.c 	closure_return(cl);
cl                120 drivers/md/bcache/request.c static void bch_data_invalidate(struct closure *cl)
cl                122 drivers/md/bcache/request.c 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
cl                148 drivers/md/bcache/request.c 	continue_at(cl, bch_data_insert_keys, op->wq);
cl                151 drivers/md/bcache/request.c static void bch_data_insert_error(struct closure *cl)
cl                153 drivers/md/bcache/request.c 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
cl                178 drivers/md/bcache/request.c 	bch_data_insert_keys(cl);
cl                183 drivers/md/bcache/request.c 	struct closure *cl = bio->bi_private;
cl                184 drivers/md/bcache/request.c 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
cl                191 drivers/md/bcache/request.c 			set_closure_fn(cl, bch_data_insert_error, op->wq);
cl                193 drivers/md/bcache/request.c 			set_closure_fn(cl, NULL, NULL);
cl                199 drivers/md/bcache/request.c static void bch_data_insert_start(struct closure *cl)
cl                201 drivers/md/bcache/request.c 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
cl                205 drivers/md/bcache/request.c 		return bch_data_invalidate(cl);
cl                225 drivers/md/bcache/request.c 			continue_at(cl, bch_data_insert_keys, op->wq);
cl                242 drivers/md/bcache/request.c 		n->bi_private	= cl;
cl                264 drivers/md/bcache/request.c 	continue_at(cl, bch_data_insert_keys, op->wq);
cl                284 drivers/md/bcache/request.c 		return bch_data_invalidate(cl);
cl                294 drivers/md/bcache/request.c 			continue_at(cl, bch_data_insert_keys, op->wq);
cl                296 drivers/md/bcache/request.c 			closure_return(cl);
cl                320 drivers/md/bcache/request.c void bch_data_insert(struct closure *cl)
cl                322 drivers/md/bcache/request.c 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
cl                329 drivers/md/bcache/request.c 	bch_data_insert_start(cl);
cl                477 drivers/md/bcache/request.c 	struct closure		cl;
cl                499 drivers/md/bcache/request.c 	struct closure *cl = bio->bi_private;
cl                500 drivers/md/bcache/request.c 	struct search *s = container_of(cl, struct search, cl);
cl                572 drivers/md/bcache/request.c 	n->bi_private	= &s->cl;
cl                589 drivers/md/bcache/request.c static void cache_lookup(struct closure *cl)
cl                591 drivers/md/bcache/request.c 	struct search *s = container_of(cl, struct search, iop.cl);
cl                602 drivers/md/bcache/request.c 		continue_at(cl, cache_lookup, bcache_wq);
cl                627 drivers/md/bcache/request.c 	closure_return(cl);
cl                634 drivers/md/bcache/request.c 	struct closure *cl = bio->bi_private;
cl                637 drivers/md/bcache/request.c 		struct search *s = container_of(cl, struct search, cl);
cl                645 drivers/md/bcache/request.c 	closure_put(cl);
cl                650 drivers/md/bcache/request.c 	struct closure *cl = bio->bi_private;
cl                653 drivers/md/bcache/request.c 		struct search *s = container_of(cl, struct search, cl);
cl                677 drivers/md/bcache/request.c 	closure_put(cl);
cl                708 drivers/md/bcache/request.c 	bio->bi_private		= &s->cl;
cl                713 drivers/md/bcache/request.c static void search_free(struct closure *cl)
cl                715 drivers/md/bcache/request.c 	struct search *s = container_of(cl, struct search, cl);
cl                723 drivers/md/bcache/request.c 	closure_debug_destroy(cl);
cl                734 drivers/md/bcache/request.c 	closure_init(&s->cl, NULL);
cl                762 drivers/md/bcache/request.c static void cached_dev_bio_complete(struct closure *cl)
cl                764 drivers/md/bcache/request.c 	struct search *s = container_of(cl, struct search, cl);
cl                768 drivers/md/bcache/request.c 	search_free(cl);
cl                773 drivers/md/bcache/request.c static void cached_dev_read_error_done(struct closure *cl)
cl                775 drivers/md/bcache/request.c 	struct search *s = container_of(cl, struct search, cl);
cl                783 drivers/md/bcache/request.c 	cached_dev_bio_complete(cl);
cl                786 drivers/md/bcache/request.c static void cached_dev_read_error(struct closure *cl)
cl                788 drivers/md/bcache/request.c 	struct search *s = container_of(cl, struct search, cl);
cl                808 drivers/md/bcache/request.c 		closure_bio_submit(s->iop.c, bio, cl);
cl                811 drivers/md/bcache/request.c 	continue_at(cl, cached_dev_read_error_done, NULL);
cl                814 drivers/md/bcache/request.c static void cached_dev_cache_miss_done(struct closure *cl)
cl                816 drivers/md/bcache/request.c 	struct search *s = container_of(cl, struct search, cl);
cl                825 drivers/md/bcache/request.c 	cached_dev_bio_complete(cl);
cl                826 drivers/md/bcache/request.c 	closure_put(&d->cl);
cl                829 drivers/md/bcache/request.c static void cached_dev_read_done(struct closure *cl)
cl                831 drivers/md/bcache/request.c 	struct search *s = container_of(cl, struct search, cl);
cl                859 drivers/md/bcache/request.c 	closure_get(&dc->disk.cl);
cl                865 drivers/md/bcache/request.c 		closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
cl                868 drivers/md/bcache/request.c 	continue_at(cl, cached_dev_cache_miss_done, NULL);
cl                871 drivers/md/bcache/request.c static void cached_dev_read_done_bh(struct closure *cl)
cl                873 drivers/md/bcache/request.c 	struct search *s = container_of(cl, struct search, cl);
cl                881 drivers/md/bcache/request.c 		continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
cl                883 drivers/md/bcache/request.c 		continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
cl                885 drivers/md/bcache/request.c 		continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
cl                938 drivers/md/bcache/request.c 	cache_bio->bi_private	= &s->cl;
cl                951 drivers/md/bcache/request.c 	closure_bio_submit(s->iop.c, cache_bio, &s->cl);
cl                958 drivers/md/bcache/request.c 	miss->bi_private	= &s->cl;
cl                960 drivers/md/bcache/request.c 	closure_bio_submit(s->iop.c, miss, &s->cl);
cl                966 drivers/md/bcache/request.c 	struct closure *cl = &s->cl;
cl                968 drivers/md/bcache/request.c 	closure_call(&s->iop.cl, cache_lookup, NULL, cl);
cl                969 drivers/md/bcache/request.c 	continue_at(cl, cached_dev_read_done_bh, NULL);
cl                974 drivers/md/bcache/request.c static void cached_dev_write_complete(struct closure *cl)
cl                976 drivers/md/bcache/request.c 	struct search *s = container_of(cl, struct search, cl);
cl                980 drivers/md/bcache/request.c 	cached_dev_bio_complete(cl);
cl                985 drivers/md/bcache/request.c 	struct closure *cl = &s->cl;
cl               1029 drivers/md/bcache/request.c 		closure_bio_submit(s->iop.c, bio, cl);
cl               1050 drivers/md/bcache/request.c 			flush->bi_private = cl;
cl               1053 drivers/md/bcache/request.c 			closure_bio_submit(s->iop.c, flush, cl);
cl               1059 drivers/md/bcache/request.c 		closure_bio_submit(s->iop.c, bio, cl);
cl               1063 drivers/md/bcache/request.c 	closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
cl               1064 drivers/md/bcache/request.c 	continue_at(cl, cached_dev_write_complete, NULL);
cl               1067 drivers/md/bcache/request.c static void cached_dev_nodata(struct closure *cl)
cl               1069 drivers/md/bcache/request.c 	struct search *s = container_of(cl, struct search, cl);
cl               1073 drivers/md/bcache/request.c 		bch_journal_meta(s->iop.c, cl);
cl               1077 drivers/md/bcache/request.c 	closure_bio_submit(s->iop.c, bio, cl);
cl               1079 drivers/md/bcache/request.c 	continue_at(cl, cached_dev_bio_complete, NULL);
cl               1223 drivers/md/bcache/request.c 			continue_at_nobarrier(&s->cl,
cl               1306 drivers/md/bcache/request.c static void flash_dev_nodata(struct closure *cl)
cl               1308 drivers/md/bcache/request.c 	struct search *s = container_of(cl, struct search, cl);
cl               1311 drivers/md/bcache/request.c 		bch_journal_meta(s->iop.c, cl);
cl               1313 drivers/md/bcache/request.c 	continue_at(cl, search_free, NULL);
cl               1320 drivers/md/bcache/request.c 	struct closure *cl;
cl               1332 drivers/md/bcache/request.c 	cl = &s->cl;
cl               1342 drivers/md/bcache/request.c 		continue_at_nobarrier(&s->cl,
cl               1355 drivers/md/bcache/request.c 		closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
cl               1357 drivers/md/bcache/request.c 		closure_call(&s->iop.cl, cache_lookup, NULL, cl);
cl               1360 drivers/md/bcache/request.c 	continue_at(cl, search_free, NULL);
cl                  6 drivers/md/bcache/request.h 	struct closure		cl;
cl                 37 drivers/md/bcache/request.h void bch_data_insert(struct closure *cl);
cl                130 drivers/md/bcache/stats.c 		closure_return(&acc->cl);
cl                185 drivers/md/bcache/stats.c 		closure_return(&acc->cl);
cl                243 drivers/md/bcache/stats.c 	closure_init(&acc->cl, parent);
cl                 30 drivers/md/bcache/stats.h 	struct closure		cl;
cl                244 drivers/md/bcache/super.c static void bch_write_bdev_super_unlock(struct closure *cl)
cl                246 drivers/md/bcache/super.c 	struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write);
cl                253 drivers/md/bcache/super.c 	struct closure *cl = &dc->sb_write;
cl                257 drivers/md/bcache/super.c 	closure_init(cl, parent);
cl                264 drivers/md/bcache/super.c 	closure_get(cl);
cl                268 drivers/md/bcache/super.c 	closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
cl                281 drivers/md/bcache/super.c static void bcache_write_super_unlock(struct closure *cl)
cl                283 drivers/md/bcache/super.c 	struct cache_set *c = container_of(cl, struct cache_set, sb_write);
cl                290 drivers/md/bcache/super.c 	struct closure *cl = &c->sb_write;
cl                295 drivers/md/bcache/super.c 	closure_init(cl, &c->cl);
cl                313 drivers/md/bcache/super.c 		closure_get(cl);
cl                317 drivers/md/bcache/super.c 	closure_return_with_destructor(cl, bcache_write_super_unlock);
cl                324 drivers/md/bcache/super.c 	struct closure *cl = bio->bi_private;
cl                325 drivers/md/bcache/super.c 	struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
cl                329 drivers/md/bcache/super.c 	closure_put(cl);
cl                332 drivers/md/bcache/super.c static void uuid_io_unlock(struct closure *cl)
cl                334 drivers/md/bcache/super.c 	struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
cl                342 drivers/md/bcache/super.c 	struct closure *cl = &c->uuid_write;
cl                349 drivers/md/bcache/super.c 	closure_init(cl, parent);
cl                358 drivers/md/bcache/super.c 		bio->bi_private = cl;
cl                377 drivers/md/bcache/super.c 	closure_return_with_destructor(cl, uuid_io_unlock);
cl                380 drivers/md/bcache/super.c static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
cl                388 drivers/md/bcache/super.c 	uuid_io(c, REQ_OP_READ, 0, k, cl);
cl                395 drivers/md/bcache/super.c 		closure_sync(cl);
cl                424 drivers/md/bcache/super.c 	struct closure cl;
cl                427 drivers/md/bcache/super.c 	closure_init_stack(&cl);
cl                434 drivers/md/bcache/super.c 	uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
cl                435 drivers/md/bcache/super.c 	closure_sync(&cl);
cl                514 drivers/md/bcache/super.c 	struct closure *cl = &ca->prio;
cl                517 drivers/md/bcache/super.c 	closure_init_stack(cl);
cl                529 drivers/md/bcache/super.c 	closure_sync(cl);
cl                536 drivers/md/bcache/super.c 	struct closure cl;
cl                555 drivers/md/bcache/super.c 	closure_init_stack(&cl);
cl                594 drivers/md/bcache/super.c 	bch_journal_meta(ca->set, &cl);
cl                595 drivers/md/bcache/super.c 	closure_sync(&cl);
cl                655 drivers/md/bcache/super.c 	closure_get(&d->cl);
cl                663 drivers/md/bcache/super.c 	closure_put(&d->cl);
cl                689 drivers/md/bcache/super.c 		closure_queue(&d->cl);
cl                807 drivers/md/bcache/super.c 	closure_debug_destroy(&d->cl);
cl                974 drivers/md/bcache/super.c 		struct closure cl;
cl                976 drivers/md/bcache/super.c 		closure_init_stack(&cl);
cl                979 drivers/md/bcache/super.c 		bch_write_bdev_super(dc, &cl);
cl                980 drivers/md/bcache/super.c 		closure_sync(&cl);
cl               1040 drivers/md/bcache/super.c 	struct closure cl;
cl               1042 drivers/md/bcache/super.c 	closure_init_stack(&cl);
cl               1059 drivers/md/bcache/super.c 	bch_write_bdev_super(dc, &cl);
cl               1060 drivers/md/bcache/super.c 	closure_sync(&cl);
cl               1076 drivers/md/bcache/super.c 	closure_put(&dc->disk.cl);
cl               1093 drivers/md/bcache/super.c 	closure_get(&dc->disk.cl);
cl               1172 drivers/md/bcache/super.c 		struct closure cl;
cl               1174 drivers/md/bcache/super.c 		closure_init_stack(&cl);
cl               1184 drivers/md/bcache/super.c 		bch_write_bdev_super(dc, &cl);
cl               1185 drivers/md/bcache/super.c 		closure_sync(&cl);
cl               1256 drivers/md/bcache/super.c static void cached_dev_free(struct closure *cl)
cl               1258 drivers/md/bcache/super.c 	struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
cl               1288 drivers/md/bcache/super.c static void cached_dev_flush(struct closure *cl)
cl               1290 drivers/md/bcache/super.c 	struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
cl               1300 drivers/md/bcache/super.c 	continue_at(cl, cached_dev_free, system_wq);
cl               1311 drivers/md/bcache/super.c 	closure_init(&dc->disk.cl, NULL);
cl               1312 drivers/md/bcache/super.c 	set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
cl               1318 drivers/md/bcache/super.c 	bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
cl               1415 drivers/md/bcache/super.c static void flash_dev_free(struct closure *cl)
cl               1417 drivers/md/bcache/super.c 	struct bcache_device *d = container_of(cl, struct bcache_device, cl);
cl               1427 drivers/md/bcache/super.c static void flash_dev_flush(struct closure *cl)
cl               1429 drivers/md/bcache/super.c 	struct bcache_device *d = container_of(cl, struct bcache_device, cl);
cl               1435 drivers/md/bcache/super.c 	continue_at(cl, flash_dev_free, system_wq);
cl               1445 drivers/md/bcache/super.c 	closure_init(&d->cl, NULL);
cl               1446 drivers/md/bcache/super.c 	set_closure_fn(&d->cl, flash_dev_flush, system_wq);
cl               1570 drivers/md/bcache/super.c static void cache_set_free(struct closure *cl)
cl               1572 drivers/md/bcache/super.c 	struct cache_set *c = container_of(cl, struct cache_set, cl);
cl               1607 drivers/md/bcache/super.c 	closure_debug_destroy(&c->cl);
cl               1611 drivers/md/bcache/super.c static void cache_set_flush(struct closure *cl)
cl               1613 drivers/md/bcache/super.c 	struct cache_set *c = container_of(cl, struct cache_set, caching);
cl               1651 drivers/md/bcache/super.c 	closure_return(cl);
cl               1710 drivers/md/bcache/super.c static void __cache_set_unregister(struct closure *cl)
cl               1712 drivers/md/bcache/super.c 	struct cache_set *c = container_of(cl, struct cache_set, caching);
cl               1737 drivers/md/bcache/super.c 	continue_at(cl, cache_set_flush, system_wq);
cl               1765 drivers/md/bcache/super.c 	closure_init(&c->cl, NULL);
cl               1766 drivers/md/bcache/super.c 	set_closure_fn(&c->cl, cache_set_free, system_wq);
cl               1768 drivers/md/bcache/super.c 	closure_init(&c->caching, &c->cl);
cl               1772 drivers/md/bcache/super.c 	closure_set_stopped(&c->cl);
cl               1773 drivers/md/bcache/super.c 	closure_put(&c->cl);
cl               1778 drivers/md/bcache/super.c 	bch_cache_accounting_init(&c->accounting, &c->cl);
cl               1851 drivers/md/bcache/super.c 	struct closure cl;
cl               1856 drivers/md/bcache/super.c 	closure_init_stack(&cl);
cl               1904 drivers/md/bcache/super.c 		err = uuid_read(c, j, &cl);
cl               1997 drivers/md/bcache/super.c 		bch_btree_node_write(c->root, &cl);
cl               2011 drivers/md/bcache/super.c 		bch_journal_meta(c, &cl);
cl               2018 drivers/md/bcache/super.c 	closure_sync(&cl);
cl               2036 drivers/md/bcache/super.c 	closure_sync(&cl);
cl                242 drivers/md/bcache/writeback.c 	struct closure		cl;
cl                263 drivers/md/bcache/writeback.c static void dirty_io_destructor(struct closure *cl)
cl                265 drivers/md/bcache/writeback.c 	struct dirty_io *io = container_of(cl, struct dirty_io, cl);
cl                270 drivers/md/bcache/writeback.c static void write_dirty_finish(struct closure *cl)
cl                272 drivers/md/bcache/writeback.c 	struct dirty_io *io = container_of(cl, struct dirty_io, cl);
cl                306 drivers/md/bcache/writeback.c 	closure_return_with_destructor(cl, dirty_io_destructor);
cl                319 drivers/md/bcache/writeback.c 	closure_put(&io->cl);
cl                322 drivers/md/bcache/writeback.c static void write_dirty(struct closure *cl)
cl                324 drivers/md/bcache/writeback.c 	struct dirty_io *io = container_of(cl, struct dirty_io, cl);
cl                332 drivers/md/bcache/writeback.c 		closure_wait(&dc->writeback_ordering_wait, cl);
cl                342 drivers/md/bcache/writeback.c 		continue_at(cl, write_dirty, io->dc->writeback_write_wq);
cl                362 drivers/md/bcache/writeback.c 		closure_bio_submit(io->dc->disk.c, &io->bio, cl);
cl                368 drivers/md/bcache/writeback.c 	continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
cl                384 drivers/md/bcache/writeback.c static void read_dirty_submit(struct closure *cl)
cl                386 drivers/md/bcache/writeback.c 	struct dirty_io *io = container_of(cl, struct dirty_io, cl);
cl                388 drivers/md/bcache/writeback.c 	closure_bio_submit(io->dc->disk.c, &io->bio, cl);
cl                390 drivers/md/bcache/writeback.c 	continue_at(cl, write_dirty, io->dc->writeback_write_wq);
cl                400 drivers/md/bcache/writeback.c 	struct closure cl;
cl                405 drivers/md/bcache/writeback.c 	closure_init_stack(&cl);
cl                489 drivers/md/bcache/writeback.c 			closure_call(&io->cl, read_dirty_submit, NULL, &cl);
cl                513 drivers/md/bcache/writeback.c 	closure_sync(&cl);
cl                191 drivers/media/pci/pt1/pt1.c static int config_demod(struct i2c_client *cl, enum pt1_fe_clk clk)
cl                198 drivers/media/pci/pt1/pt1.c 	is_sat = !strncmp(cl->name, TC90522_I2C_DEV_SAT,
cl                205 drivers/media/pci/pt1/pt1.c 		msg[0].addr = cl->addr;
cl                210 drivers/media/pci/pt1/pt1.c 		msg[1].addr = cl->addr;
cl                214 drivers/media/pci/pt1/pt1.c 		ret = i2c_transfer(cl->adapter, msg, 2);
cl                241 drivers/media/pci/pt1/pt1.c 		ret = i2c_master_send(cl, cfg_data[i], 2);
cl                256 drivers/media/pci/pt1/pt1.c 	struct i2c_client *cl;
cl                263 drivers/media/pci/pt1/pt1.c 		cl = pt1->adaps[i]->demod_i2c_client;
cl                264 drivers/media/pci/pt1/pt1.c 		if (strncmp(cl->name, TC90522_I2C_DEV_TER,
cl                268 drivers/media/pci/pt1/pt1.c 		ret = i2c_master_send(cl, buf, 2);
cl                275 drivers/media/pci/pt1/pt1.c 		cl = pt1->adaps[i]->demod_i2c_client;
cl                276 drivers/media/pci/pt1/pt1.c 		if (strncmp(cl->name, TC90522_I2C_DEV_SAT,
cl                280 drivers/media/pci/pt1/pt1.c 		ret = i2c_master_send(cl, buf, 2);
cl                973 drivers/media/pci/pt1/pt1.c 		struct i2c_client *cl;
cl                980 drivers/media/pci/pt1/pt1.c 		cl = dvb_module_probe("tc90522", info->type, &pt1->i2c_adap,
cl                982 drivers/media/pci/pt1/pt1.c 		if (!cl)
cl                984 drivers/media/pci/pt1/pt1.c 		pt1->adaps[i]->demod_i2c_client = cl;
cl                986 drivers/media/pci/pt1/pt1.c 		if (!strncmp(cl->name, TC90522_I2C_DEV_SAT,
cl                993 drivers/media/pci/pt1/pt1.c 			cl = dvb_module_probe("qm1d1b0004",
cl               1002 drivers/media/pci/pt1/pt1.c 			cl = dvb_module_probe("dvb_pll",
cl               1006 drivers/media/pci/pt1/pt1.c 		if (!cl)
cl               1008 drivers/media/pci/pt1/pt1.c 		pt1->adaps[i]->tuner_i2c_client = cl;
cl                372 drivers/media/pci/pt3/pt3.c 	struct i2c_client *cl;
cl                381 drivers/media/pci/pt3/pt3.c 	cl = dvb_module_probe("tc90522", info->type, &pt3->i2c_adap,
cl                383 drivers/media/pci/pt3/pt3.c 	if (!cl)
cl                385 drivers/media/pci/pt3/pt3.c 	pt3->adaps[i]->i2c_demod = cl;
cl                387 drivers/media/pci/pt3/pt3.c 	if (!strncmp(cl->name, TC90522_I2C_DEV_SAT,
cl                394 drivers/media/pci/pt3/pt3.c 		cl = dvb_module_probe("qm1d1c0042", info->type, cfg.tuner_i2c,
cl                402 drivers/media/pci/pt3/pt3.c 		cl = dvb_module_probe("mxl301rf", info->type, cfg.tuner_i2c,
cl                405 drivers/media/pci/pt3/pt3.c 	if (!cl)
cl                407 drivers/media/pci/pt3/pt3.c 	pt3->adaps[i]->i2c_tuner = cl;
cl                581 drivers/media/pci/saa7134/saa7134-video.c 			struct cliplist *cl, int entries, char *name)
cl                587 drivers/media/pci/saa7134/saa7134-video.c 		winbits |= cl[i].enable;
cl                588 drivers/media/pci/saa7134/saa7134-video.c 		winbits &= ~cl[i].disable;
cl                589 drivers/media/pci/saa7134/saa7134-video.c 		if (i < 15 && cl[i].position == cl[i+1].position)
cl                592 drivers/media/pci/saa7134/saa7134-video.c 		saa_writeb(reg + 2, cl[i].position & 0xff);
cl                593 drivers/media/pci/saa7134/saa7134-video.c 		saa_writeb(reg + 3, cl[i].position >> 8);
cl                595 drivers/media/pci/saa7134/saa7134-video.c 			name,winbits,cl[i].position);
cl                949 drivers/media/pci/ttpci/av7110_hw.c 	u16 ch, cl;
cl                953 drivers/media/pci/ttpci/av7110_hw.c 	cl = (yuv & 0xffff);
cl                956 drivers/media/pci/ttpci/av7110_hw.c 			color, ch, cl);
cl                490 drivers/media/usb/dvb-usb-v2/gl861.c 	struct i2c_client *cl;
cl                495 drivers/media/usb/dvb-usb-v2/gl861.c 	cl = dvb_module_probe("tc90522", info->type,
cl                497 drivers/media/usb/dvb-usb-v2/gl861.c 	if (!cl)
cl                503 drivers/media/usb/dvb-usb-v2/gl861.c 	priv->i2c_client_demod = cl;
cl                528 drivers/media/usb/dvb-usb-v2/gl861.c 	struct i2c_client *cl;
cl                536 drivers/media/usb/dvb-usb-v2/gl861.c 	cl = dvb_module_probe("dvb_pll", info->type,
cl                538 drivers/media/usb/dvb-usb-v2/gl861.c 	if (!cl)
cl                540 drivers/media/usb/dvb-usb-v2/gl861.c 	priv->i2c_client_tuner = cl;
cl                245 drivers/memory/emif.c 	u32		cl;
cl                248 drivers/memory/emif.c 	cl = (readl(base + EMIF_SDRAM_CONFIG) & CL_MASK) >> CL_SHIFT;
cl                250 drivers/memory/emif.c 	return cl;
cl                692 drivers/memory/emif.c static u32 get_phy_ctrl_1_intelliphy_4d5(u32 freq, u8 cl)
cl                706 drivers/memory/emif.c 	phy |= ((cl + DIV_ROUND_UP(EMIF_PHY_TOTAL_READ_LATENCY_INTELLIPHY_PS,
cl               1617 drivers/memory/emif.c 	u32				cl, type;
cl               1659 drivers/memory/emif.c 	cl = get_cl(emif);
cl               1663 drivers/memory/emif.c 			timings, freq, cl);
cl               1665 drivers/memory/emif.c 		regs->phy_ctrl_1_shdw = get_phy_ctrl_1_intelliphy_4d5(freq, cl);
cl                105 drivers/mfd/lp3943.c static int lp3943_probe(struct i2c_client *cl, const struct i2c_device_id *id)
cl                108 drivers/mfd/lp3943.c 	struct device *dev = &cl->dev;
cl                114 drivers/mfd/lp3943.c 	lp3943->regmap = devm_regmap_init_i2c(cl, &lp3943_regmap_config);
cl                121 drivers/mfd/lp3943.c 	i2c_set_clientdata(cl, lp3943);
cl                169 drivers/mfd/lp8788.c static int lp8788_probe(struct i2c_client *cl, const struct i2c_device_id *id)
cl                172 drivers/mfd/lp8788.c 	struct lp8788_platform_data *pdata = dev_get_platdata(&cl->dev);
cl                175 drivers/mfd/lp8788.c 	lp = devm_kzalloc(&cl->dev, sizeof(struct lp8788), GFP_KERNEL);
cl                179 drivers/mfd/lp8788.c 	lp->regmap = devm_regmap_init_i2c(cl, &lp8788_regmap_config);
cl                182 drivers/mfd/lp8788.c 		dev_err(&cl->dev, "regmap init i2c err: %d\n", ret);
cl                187 drivers/mfd/lp8788.c 	lp->dev = &cl->dev;
cl                188 drivers/mfd/lp8788.c 	i2c_set_clientdata(cl, lp);
cl                194 drivers/mfd/lp8788.c 	ret = lp8788_irq_init(lp, cl->irq);
cl                202 drivers/mfd/lp8788.c static int lp8788_remove(struct i2c_client *cl)
cl                204 drivers/mfd/lp8788.c 	struct lp8788 *lp = i2c_get_clientdata(cl);
cl                136 drivers/mfd/ti-lmu.c static int ti_lmu_probe(struct i2c_client *cl, const struct i2c_device_id *id)
cl                138 drivers/mfd/ti-lmu.c 	struct device *dev = &cl->dev;
cl                156 drivers/mfd/ti-lmu.c 	lmu->dev = &cl->dev;
cl                165 drivers/mfd/ti-lmu.c 	lmu->regmap = devm_regmap_init_i2c(cl, &regmap_cfg);
cl                192 drivers/mfd/ti-lmu.c 	i2c_set_clientdata(cl, lmu);
cl                157 drivers/misc/mei/bus-fixup.c 	return __mei_cl_send(cldev->cl, buf, size, mode);
cl                178 drivers/misc/mei/bus-fixup.c 	ret = __mei_cl_send(cldev->cl, buf, sizeof(struct mkhi_msg_hdr),
cl                186 drivers/misc/mei/bus-fixup.c 	bytes_recv = __mei_cl_recv(cldev->cl, buf, sizeof(buf), 0,
cl                322 drivers/misc/mei/bus-fixup.c static int mei_nfc_if_version(struct mei_cl *cl,
cl                335 drivers/misc/mei/bus-fixup.c 	bus = cl->dev;
cl                339 drivers/misc/mei/bus-fixup.c 	ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd),
cl                355 drivers/misc/mei/bus-fixup.c 	bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length, 0, 0);
cl                405 drivers/misc/mei/bus-fixup.c 	struct mei_cl *cl;
cl                417 drivers/misc/mei/bus-fixup.c 	cl = mei_cl_alloc_linked(bus);
cl                418 drivers/misc/mei/bus-fixup.c 	if (IS_ERR(cl)) {
cl                419 drivers/misc/mei/bus-fixup.c 		ret = PTR_ERR(cl);
cl                420 drivers/misc/mei/bus-fixup.c 		cl = NULL;
cl                432 drivers/misc/mei/bus-fixup.c 	ret = mei_cl_connect(cl, me_cl, NULL);
cl                441 drivers/misc/mei/bus-fixup.c 	ret = mei_nfc_if_version(cl, &ver);
cl                459 drivers/misc/mei/bus-fixup.c 	if (mei_cl_disconnect(cl) < 0)
cl                462 drivers/misc/mei/bus-fixup.c 	mei_cl_flush_queues(cl, NULL);
cl                465 drivers/misc/mei/bus-fixup.c 	mei_cl_unlink(cl);
cl                468 drivers/misc/mei/bus-fixup.c 	kfree(cl);
cl                 33 drivers/misc/mei/bus.c ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
cl                 40 drivers/misc/mei/bus.c 	if (WARN_ON(!cl || !cl->dev))
cl                 43 drivers/misc/mei/bus.c 	bus = cl->dev;
cl                 51 drivers/misc/mei/bus.c 	if (!mei_cl_is_connected(cl)) {
cl                 57 drivers/misc/mei/bus.c 	if (!mei_me_cl_is_active(cl->me_cl)) {
cl                 62 drivers/misc/mei/bus.c 	if (length > mei_cl_mtu(cl)) {
cl                 67 drivers/misc/mei/bus.c 	while (cl->tx_cb_queued >= bus->tx_queue_limit) {
cl                 69 drivers/misc/mei/bus.c 		rets = wait_event_interruptible(cl->tx_wait,
cl                 70 drivers/misc/mei/bus.c 				cl->writing_state == MEI_WRITE_COMPLETE ||
cl                 71 drivers/misc/mei/bus.c 				(!mei_cl_is_connected(cl)));
cl                 78 drivers/misc/mei/bus.c 		if (!mei_cl_is_connected(cl)) {
cl                 84 drivers/misc/mei/bus.c 	cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, NULL);
cl                 94 drivers/misc/mei/bus.c 	rets = mei_cl_write(cl, cb);
cl                113 drivers/misc/mei/bus.c ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length,
cl                122 drivers/misc/mei/bus.c 	if (WARN_ON(!cl || !cl->dev))
cl                125 drivers/misc/mei/bus.c 	bus = cl->dev;
cl                133 drivers/misc/mei/bus.c 	cb = mei_cl_read_cb(cl, NULL);
cl                137 drivers/misc/mei/bus.c 	rets = mei_cl_read_start(cl, length, NULL);
cl                148 drivers/misc/mei/bus.c 	if (!waitqueue_active(&cl->rx_wait)) {
cl                154 drivers/misc/mei/bus.c 					(cl->rx_wait,
cl                155 drivers/misc/mei/bus.c 					(!list_empty(&cl->rd_completed)) ||
cl                156 drivers/misc/mei/bus.c 					(!mei_cl_is_connected(cl)),
cl                167 drivers/misc/mei/bus.c 					(cl->rx_wait,
cl                168 drivers/misc/mei/bus.c 					(!list_empty(&cl->rd_completed)) ||
cl                169 drivers/misc/mei/bus.c 					(!mei_cl_is_connected(cl)))) {
cl                178 drivers/misc/mei/bus.c 		if (!mei_cl_is_connected(cl)) {
cl                184 drivers/misc/mei/bus.c 	cb = mei_cl_read_cb(cl, NULL);
cl                219 drivers/misc/mei/bus.c 	struct mei_cl *cl = cldev->cl;
cl                221 drivers/misc/mei/bus.c 	return __mei_cl_send(cl, buf, length, MEI_CL_IO_TX_BLOCKING);
cl                238 drivers/misc/mei/bus.c 	struct mei_cl *cl = cldev->cl;
cl                240 drivers/misc/mei/bus.c 	return __mei_cl_recv(cl, buf, length, MEI_CL_IO_RX_NONBLOCK, 0);
cl                255 drivers/misc/mei/bus.c 	struct mei_cl *cl = cldev->cl;
cl                257 drivers/misc/mei/bus.c 	return __mei_cl_recv(cl, buf, length, 0, 0);
cl                279 drivers/misc/mei/bus.c 	mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
cl                306 drivers/misc/mei/bus.c bool mei_cl_bus_notify_event(struct mei_cl *cl)
cl                308 drivers/misc/mei/bus.c 	struct mei_cl_device *cldev = cl->cldev;
cl                313 drivers/misc/mei/bus.c 	if (!cl->notify_ev)
cl                318 drivers/misc/mei/bus.c 	cl->notify_ev = false;
cl                331 drivers/misc/mei/bus.c bool mei_cl_bus_rx_event(struct mei_cl *cl)
cl                333 drivers/misc/mei/bus.c 	struct mei_cl_device *cldev = cl->cldev;
cl                367 drivers/misc/mei/bus.c 	ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
cl                402 drivers/misc/mei/bus.c 	ret = mei_cl_notify_request(cldev->cl, NULL, 1);
cl                471 drivers/misc/mei/bus.c 	return mei_cl_is_connected(cldev->cl);
cl                509 drivers/misc/mei/bus.c 	struct mei_cl *cl;
cl                512 drivers/misc/mei/bus.c 	cl = cldev->cl;
cl                515 drivers/misc/mei/bus.c 	if (cl->state == MEI_FILE_UNINITIALIZED) {
cl                516 drivers/misc/mei/bus.c 		ret = mei_cl_link(cl);
cl                520 drivers/misc/mei/bus.c 		cl->cldev = cldev;
cl                523 drivers/misc/mei/bus.c 	if (mei_cl_is_connected(cl)) {
cl                534 drivers/misc/mei/bus.c 	ret = mei_cl_connect(cl, cldev->me_cl, NULL);
cl                575 drivers/misc/mei/bus.c 	struct mei_cl *cl;
cl                581 drivers/misc/mei/bus.c 	cl = cldev->cl;
cl                589 drivers/misc/mei/bus.c 	if (!mei_cl_is_connected(cl)) {
cl                595 drivers/misc/mei/bus.c 	err = mei_cl_disconnect(cl);
cl                601 drivers/misc/mei/bus.c 	mei_cl_flush_queues(cl, NULL);
cl                602 drivers/misc/mei/bus.c 	mei_cl_unlink(cl);
cl                865 drivers/misc/mei/bus.c 	mei_cl_unlink(cldev->cl);
cl                866 drivers/misc/mei/bus.c 	kfree(cldev->cl);
cl                900 drivers/misc/mei/bus.c 	struct mei_cl *cl;
cl                906 drivers/misc/mei/bus.c 	cl = mei_cl_allocate(bus);
cl                907 drivers/misc/mei/bus.c 	if (!cl) {
cl                918 drivers/misc/mei/bus.c 	cldev->cl         = cl;
cl                339 drivers/misc/mei/client.c 	cb->cl->tx_cb_queued++;
cl                351 drivers/misc/mei/client.c 	if (!WARN_ON(cb->cl->tx_cb_queued == 0))
cl                352 drivers/misc/mei/client.c 		cb->cl->tx_cb_queued--;
cl                366 drivers/misc/mei/client.c static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl,
cl                378 drivers/misc/mei/client.c 	cb->cl = cl;
cl                391 drivers/misc/mei/client.c 				 const struct mei_cl *cl)
cl                396 drivers/misc/mei/client.c 		if (cl == cb->cl) {
cl                411 drivers/misc/mei/client.c 				   const struct mei_cl *cl)
cl                416 drivers/misc/mei/client.c 		if (cl == cb->cl)
cl                446 drivers/misc/mei/client.c struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
cl                452 drivers/misc/mei/client.c 	cb = mei_io_cb_init(cl, fop_type, fp);
cl                481 drivers/misc/mei/client.c struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
cl                489 drivers/misc/mei/client.c 		length = max_t(size_t, length, mei_cl_mtu(cl));
cl                491 drivers/misc/mei/client.c 	cb = mei_cl_alloc_cb(cl, length, fop_type, fp);
cl                495 drivers/misc/mei/client.c 	list_add_tail(&cb->list, &cl->dev->ctrl_wr_list);
cl                508 drivers/misc/mei/client.c struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp)
cl                512 drivers/misc/mei/client.c 	list_for_each_entry(cb, &cl->rd_completed, list)
cl                527 drivers/misc/mei/client.c int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
cl                531 drivers/misc/mei/client.c 	if (WARN_ON(!cl || !cl->dev))
cl                534 drivers/misc/mei/client.c 	dev = cl->dev;
cl                536 drivers/misc/mei/client.c 	cl_dbg(dev, cl, "remove list entry belonging to cl\n");
cl                537 drivers/misc/mei/client.c 	mei_io_tx_list_free_cl(&cl->dev->write_list, cl);
cl                538 drivers/misc/mei/client.c 	mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl);
cl                539 drivers/misc/mei/client.c 	mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl);
cl                540 drivers/misc/mei/client.c 	mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl);
cl                541 drivers/misc/mei/client.c 	mei_io_list_free_fp(&cl->rd_pending, fp);
cl                542 drivers/misc/mei/client.c 	mei_io_list_free_fp(&cl->rd_completed, fp);
cl                553 drivers/misc/mei/client.c static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
cl                555 drivers/misc/mei/client.c 	memset(cl, 0, sizeof(struct mei_cl));
cl                556 drivers/misc/mei/client.c 	init_waitqueue_head(&cl->wait);
cl                557 drivers/misc/mei/client.c 	init_waitqueue_head(&cl->rx_wait);
cl                558 drivers/misc/mei/client.c 	init_waitqueue_head(&cl->tx_wait);
cl                559 drivers/misc/mei/client.c 	init_waitqueue_head(&cl->ev_wait);
cl                560 drivers/misc/mei/client.c 	INIT_LIST_HEAD(&cl->rd_completed);
cl                561 drivers/misc/mei/client.c 	INIT_LIST_HEAD(&cl->rd_pending);
cl                562 drivers/misc/mei/client.c 	INIT_LIST_HEAD(&cl->link);
cl                563 drivers/misc/mei/client.c 	cl->writing_state = MEI_IDLE;
cl                564 drivers/misc/mei/client.c 	cl->state = MEI_FILE_UNINITIALIZED;
cl                565 drivers/misc/mei/client.c 	cl->dev = dev;
cl                576 drivers/misc/mei/client.c 	struct mei_cl *cl;
cl                578 drivers/misc/mei/client.c 	cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
cl                579 drivers/misc/mei/client.c 	if (!cl)
cl                582 drivers/misc/mei/client.c 	mei_cl_init(cl, dev);
cl                584 drivers/misc/mei/client.c 	return cl;
cl                596 drivers/misc/mei/client.c int mei_cl_link(struct mei_cl *cl)
cl                601 drivers/misc/mei/client.c 	if (WARN_ON(!cl || !cl->dev))
cl                604 drivers/misc/mei/client.c 	dev = cl->dev;
cl                620 drivers/misc/mei/client.c 	cl->host_client_id = id;
cl                621 drivers/misc/mei/client.c 	list_add_tail(&cl->link, &dev->file_list);
cl                625 drivers/misc/mei/client.c 	cl->state = MEI_FILE_INITIALIZING;
cl                627 drivers/misc/mei/client.c 	cl_dbg(dev, cl, "link cl\n");
cl                638 drivers/misc/mei/client.c int mei_cl_unlink(struct mei_cl *cl)
cl                643 drivers/misc/mei/client.c 	if (!cl)
cl                646 drivers/misc/mei/client.c 	if (WARN_ON(!cl->dev))
cl                649 drivers/misc/mei/client.c 	dev = cl->dev;
cl                651 drivers/misc/mei/client.c 	cl_dbg(dev, cl, "unlink client");
cl                657 drivers/misc/mei/client.c 	if (cl->host_client_id)
cl                658 drivers/misc/mei/client.c 		clear_bit(cl->host_client_id, dev->host_clients_map);
cl                660 drivers/misc/mei/client.c 	list_del_init(&cl->link);
cl                662 drivers/misc/mei/client.c 	cl->state = MEI_FILE_UNINITIALIZED;
cl                663 drivers/misc/mei/client.c 	cl->writing_state = MEI_IDLE;
cl                665 drivers/misc/mei/client.c 	WARN_ON(!list_empty(&cl->rd_completed) ||
cl                666 drivers/misc/mei/client.c 		!list_empty(&cl->rd_pending) ||
cl                667 drivers/misc/mei/client.c 		!list_empty(&cl->link));
cl                714 drivers/misc/mei/client.c static void mei_cl_wake_all(struct mei_cl *cl)
cl                716 drivers/misc/mei/client.c 	struct mei_device *dev = cl->dev;
cl                719 drivers/misc/mei/client.c 	if (waitqueue_active(&cl->rx_wait)) {
cl                720 drivers/misc/mei/client.c 		cl_dbg(dev, cl, "Waking up reading client!\n");
cl                721 drivers/misc/mei/client.c 		wake_up_interruptible(&cl->rx_wait);
cl                724 drivers/misc/mei/client.c 	if (waitqueue_active(&cl->tx_wait)) {
cl                725 drivers/misc/mei/client.c 		cl_dbg(dev, cl, "Waking up writing client!\n");
cl                726 drivers/misc/mei/client.c 		wake_up_interruptible(&cl->tx_wait);
cl                729 drivers/misc/mei/client.c 	if (waitqueue_active(&cl->ev_wait)) {
cl                730 drivers/misc/mei/client.c 		cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
cl                731 drivers/misc/mei/client.c 		wake_up_interruptible(&cl->ev_wait);
cl                734 drivers/misc/mei/client.c 	if (waitqueue_active(&cl->wait)) {
cl                735 drivers/misc/mei/client.c 		cl_dbg(dev, cl, "Waking up ctrl write clients!\n");
cl                736 drivers/misc/mei/client.c 		wake_up(&cl->wait);
cl                746 drivers/misc/mei/client.c static void mei_cl_set_disconnected(struct mei_cl *cl)
cl                748 drivers/misc/mei/client.c 	struct mei_device *dev = cl->dev;
cl                750 drivers/misc/mei/client.c 	if (cl->state == MEI_FILE_DISCONNECTED ||
cl                751 drivers/misc/mei/client.c 	    cl->state <= MEI_FILE_INITIALIZING)
cl                754 drivers/misc/mei/client.c 	cl->state = MEI_FILE_DISCONNECTED;
cl                755 drivers/misc/mei/client.c 	mei_io_tx_list_free_cl(&dev->write_list, cl);
cl                756 drivers/misc/mei/client.c 	mei_io_tx_list_free_cl(&dev->write_waiting_list, cl);
cl                757 drivers/misc/mei/client.c 	mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
cl                758 drivers/misc/mei/client.c 	mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
cl                759 drivers/misc/mei/client.c 	mei_cl_wake_all(cl);
cl                760 drivers/misc/mei/client.c 	cl->rx_flow_ctrl_creds = 0;
cl                761 drivers/misc/mei/client.c 	cl->tx_flow_ctrl_creds = 0;
cl                762 drivers/misc/mei/client.c 	cl->timer_count = 0;
cl                764 drivers/misc/mei/client.c 	if (!cl->me_cl)
cl                767 drivers/misc/mei/client.c 	if (!WARN_ON(cl->me_cl->connect_count == 0))
cl                768 drivers/misc/mei/client.c 		cl->me_cl->connect_count--;
cl                770 drivers/misc/mei/client.c 	if (cl->me_cl->connect_count == 0)
cl                771 drivers/misc/mei/client.c 		cl->me_cl->tx_flow_ctrl_creds = 0;
cl                773 drivers/misc/mei/client.c 	mei_me_cl_put(cl->me_cl);
cl                774 drivers/misc/mei/client.c 	cl->me_cl = NULL;
cl                777 drivers/misc/mei/client.c static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl)
cl                790 drivers/misc/mei/client.c 	cl->me_cl = me_cl;
cl                791 drivers/misc/mei/client.c 	cl->state = MEI_FILE_CONNECTING;
cl                792 drivers/misc/mei/client.c 	cl->me_cl->connect_count++;
cl                805 drivers/misc/mei/client.c static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
cl                810 drivers/misc/mei/client.c 	dev = cl->dev;
cl                812 drivers/misc/mei/client.c 	ret = mei_hbm_cl_disconnect_req(dev, cl);
cl                813 drivers/misc/mei/client.c 	cl->status = ret;
cl                815 drivers/misc/mei/client.c 		cl->state = MEI_FILE_DISCONNECT_REPLY;
cl                820 drivers/misc/mei/client.c 	cl->timer_count = MEI_CONNECT_TIMEOUT;
cl                836 drivers/misc/mei/client.c int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
cl                839 drivers/misc/mei/client.c 	struct mei_device *dev = cl->dev;
cl                852 drivers/misc/mei/client.c 	ret = mei_cl_send_disconnect(cl, cb);
cl                867 drivers/misc/mei/client.c static int __mei_cl_disconnect(struct mei_cl *cl)
cl                873 drivers/misc/mei/client.c 	dev = cl->dev;
cl                875 drivers/misc/mei/client.c 	cl->state = MEI_FILE_DISCONNECTING;
cl                877 drivers/misc/mei/client.c 	cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT, NULL);
cl                884 drivers/misc/mei/client.c 		rets = mei_cl_send_disconnect(cl, cb);
cl                886 drivers/misc/mei/client.c 			cl_err(dev, cl, "failed to disconnect.\n");
cl                892 drivers/misc/mei/client.c 	wait_event_timeout(cl->wait,
cl                893 drivers/misc/mei/client.c 			   cl->state == MEI_FILE_DISCONNECT_REPLY ||
cl                894 drivers/misc/mei/client.c 			   cl->state == MEI_FILE_DISCONNECTED,
cl                898 drivers/misc/mei/client.c 	rets = cl->status;
cl                899 drivers/misc/mei/client.c 	if (cl->state != MEI_FILE_DISCONNECT_REPLY &&
cl                900 drivers/misc/mei/client.c 	    cl->state != MEI_FILE_DISCONNECTED) {
cl                901 drivers/misc/mei/client.c 		cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
cl                907 drivers/misc/mei/client.c 	mei_cl_set_disconnected(cl);
cl                909 drivers/misc/mei/client.c 		cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
cl                924 drivers/misc/mei/client.c int mei_cl_disconnect(struct mei_cl *cl)
cl                929 drivers/misc/mei/client.c 	if (WARN_ON(!cl || !cl->dev))
cl                932 drivers/misc/mei/client.c 	dev = cl->dev;
cl                934 drivers/misc/mei/client.c 	cl_dbg(dev, cl, "disconnecting");
cl                936 drivers/misc/mei/client.c 	if (!mei_cl_is_connected(cl))
cl                939 drivers/misc/mei/client.c 	if (mei_cl_is_fixed_address(cl)) {
cl                940 drivers/misc/mei/client.c 		mei_cl_set_disconnected(cl);
cl                945 drivers/misc/mei/client.c 		cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n");
cl                946 drivers/misc/mei/client.c 		mei_cl_set_disconnected(cl);
cl                953 drivers/misc/mei/client.c 		cl_err(dev, cl, "rpm: get failed %d\n", rets);
cl                957 drivers/misc/mei/client.c 	rets = __mei_cl_disconnect(cl);
cl                959 drivers/misc/mei/client.c 	cl_dbg(dev, cl, "rpm: autosuspend\n");
cl                975 drivers/misc/mei/client.c static bool mei_cl_is_other_connecting(struct mei_cl *cl)
cl                980 drivers/misc/mei/client.c 	dev = cl->dev;
cl                984 drivers/misc/mei/client.c 		    mei_cl_me_id(cl) == mei_cl_me_id(cb->cl))
cl                999 drivers/misc/mei/client.c static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
cl               1004 drivers/misc/mei/client.c 	dev = cl->dev;
cl               1006 drivers/misc/mei/client.c 	ret = mei_hbm_cl_connect_req(dev, cl);
cl               1007 drivers/misc/mei/client.c 	cl->status = ret;
cl               1009 drivers/misc/mei/client.c 		cl->state = MEI_FILE_DISCONNECT_REPLY;
cl               1014 drivers/misc/mei/client.c 	cl->timer_count = MEI_CONNECT_TIMEOUT;
cl               1028 drivers/misc/mei/client.c int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
cl               1031 drivers/misc/mei/client.c 	struct mei_device *dev = cl->dev;
cl               1036 drivers/misc/mei/client.c 	if (mei_cl_is_other_connecting(cl))
cl               1047 drivers/misc/mei/client.c 	rets = mei_cl_send_connect(cl, cb);
cl               1065 drivers/misc/mei/client.c int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
cl               1072 drivers/misc/mei/client.c 	if (WARN_ON(!cl || !cl->dev || !me_cl))
cl               1075 drivers/misc/mei/client.c 	dev = cl->dev;
cl               1077 drivers/misc/mei/client.c 	rets = mei_cl_set_connecting(cl, me_cl);
cl               1081 drivers/misc/mei/client.c 	if (mei_cl_is_fixed_address(cl)) {
cl               1082 drivers/misc/mei/client.c 		cl->state = MEI_FILE_CONNECTED;
cl               1090 drivers/misc/mei/client.c 		cl_err(dev, cl, "rpm: get failed %d\n", rets);
cl               1094 drivers/misc/mei/client.c 	cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_CONNECT, fp);
cl               1101 drivers/misc/mei/client.c 	if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
cl               1102 drivers/misc/mei/client.c 		rets = mei_cl_send_connect(cl, cb);
cl               1108 drivers/misc/mei/client.c 	wait_event_timeout(cl->wait,
cl               1109 drivers/misc/mei/client.c 			(cl->state == MEI_FILE_CONNECTED ||
cl               1110 drivers/misc/mei/client.c 			 cl->state == MEI_FILE_DISCONNECTED ||
cl               1111 drivers/misc/mei/client.c 			 cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
cl               1112 drivers/misc/mei/client.c 			 cl->state == MEI_FILE_DISCONNECT_REPLY),
cl               1116 drivers/misc/mei/client.c 	if (!mei_cl_is_connected(cl)) {
cl               1117 drivers/misc/mei/client.c 		if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) {
cl               1118 drivers/misc/mei/client.c 			mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
cl               1119 drivers/misc/mei/client.c 			mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
cl               1123 drivers/misc/mei/client.c 			__mei_cl_disconnect(cl);
cl               1129 drivers/misc/mei/client.c 		if (!cl->status)
cl               1130 drivers/misc/mei/client.c 			cl->status = -EFAULT;
cl               1133 drivers/misc/mei/client.c 	rets = cl->status;
cl               1135 drivers/misc/mei/client.c 	cl_dbg(dev, cl, "rpm: autosuspend\n");
cl               1142 drivers/misc/mei/client.c 	if (!mei_cl_is_connected(cl))
cl               1143 drivers/misc/mei/client.c 		mei_cl_set_disconnected(cl);
cl               1157 drivers/misc/mei/client.c 	struct mei_cl *cl;
cl               1160 drivers/misc/mei/client.c 	cl = mei_cl_allocate(dev);
cl               1161 drivers/misc/mei/client.c 	if (!cl) {
cl               1166 drivers/misc/mei/client.c 	ret = mei_cl_link(cl);
cl               1170 drivers/misc/mei/client.c 	return cl;
cl               1172 drivers/misc/mei/client.c 	kfree(cl);
cl               1183 drivers/misc/mei/client.c static int mei_cl_tx_flow_ctrl_creds(struct mei_cl *cl)
cl               1185 drivers/misc/mei/client.c 	if (WARN_ON(!cl || !cl->me_cl))
cl               1188 drivers/misc/mei/client.c 	if (cl->tx_flow_ctrl_creds > 0)
cl               1191 drivers/misc/mei/client.c 	if (mei_cl_is_fixed_address(cl))
cl               1194 drivers/misc/mei/client.c 	if (mei_cl_is_single_recv_buf(cl)) {
cl               1195 drivers/misc/mei/client.c 		if (cl->me_cl->tx_flow_ctrl_creds > 0)
cl               1211 drivers/misc/mei/client.c static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl)
cl               1213 drivers/misc/mei/client.c 	if (WARN_ON(!cl || !cl->me_cl))
cl               1216 drivers/misc/mei/client.c 	if (mei_cl_is_fixed_address(cl))
cl               1219 drivers/misc/mei/client.c 	if (mei_cl_is_single_recv_buf(cl)) {
cl               1220 drivers/misc/mei/client.c 		if (WARN_ON(cl->me_cl->tx_flow_ctrl_creds <= 0))
cl               1222 drivers/misc/mei/client.c 		cl->me_cl->tx_flow_ctrl_creds--;
cl               1224 drivers/misc/mei/client.c 		if (WARN_ON(cl->tx_flow_ctrl_creds <= 0))
cl               1226 drivers/misc/mei/client.c 		cl->tx_flow_ctrl_creds--;
cl               1270 drivers/misc/mei/client.c int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
cl               1273 drivers/misc/mei/client.c 	struct mei_device *dev = cl->dev;
cl               1288 drivers/misc/mei/client.c 	ret = mei_hbm_cl_notify_req(dev, cl, request);
cl               1290 drivers/misc/mei/client.c 		cl->status = ret;
cl               1310 drivers/misc/mei/client.c int mei_cl_notify_request(struct mei_cl *cl,
cl               1318 drivers/misc/mei/client.c 	if (WARN_ON(!cl || !cl->dev))
cl               1321 drivers/misc/mei/client.c 	dev = cl->dev;
cl               1324 drivers/misc/mei/client.c 		cl_dbg(dev, cl, "notifications not supported\n");
cl               1328 drivers/misc/mei/client.c 	if (!mei_cl_is_connected(cl))
cl               1334 drivers/misc/mei/client.c 		cl_err(dev, cl, "rpm: get failed %d\n", rets);
cl               1339 drivers/misc/mei/client.c 	cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, fop_type, fp);
cl               1346 drivers/misc/mei/client.c 		if (mei_hbm_cl_notify_req(dev, cl, request)) {
cl               1354 drivers/misc/mei/client.c 	wait_event_timeout(cl->wait,
cl               1355 drivers/misc/mei/client.c 			   cl->notify_en == request ||
cl               1356 drivers/misc/mei/client.c 			   cl->status ||
cl               1357 drivers/misc/mei/client.c 			   !mei_cl_is_connected(cl),
cl               1361 drivers/misc/mei/client.c 	if (cl->notify_en != request && !cl->status)
cl               1362 drivers/misc/mei/client.c 		cl->status = -EFAULT;
cl               1364 drivers/misc/mei/client.c 	rets = cl->status;
cl               1367 drivers/misc/mei/client.c 	cl_dbg(dev, cl, "rpm: autosuspend\n");
cl               1382 drivers/misc/mei/client.c void mei_cl_notify(struct mei_cl *cl)
cl               1386 drivers/misc/mei/client.c 	if (!cl || !cl->dev)
cl               1389 drivers/misc/mei/client.c 	dev = cl->dev;
cl               1391 drivers/misc/mei/client.c 	if (!cl->notify_en)
cl               1394 drivers/misc/mei/client.c 	cl_dbg(dev, cl, "notify event");
cl               1395 drivers/misc/mei/client.c 	cl->notify_ev = true;
cl               1396 drivers/misc/mei/client.c 	if (!mei_cl_bus_notify_event(cl))
cl               1397 drivers/misc/mei/client.c 		wake_up_interruptible(&cl->ev_wait);
cl               1399 drivers/misc/mei/client.c 	if (cl->ev_async)
cl               1400 drivers/misc/mei/client.c 		kill_fasync(&cl->ev_async, SIGIO, POLL_PRI);
cl               1415 drivers/misc/mei/client.c int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev)
cl               1422 drivers/misc/mei/client.c 	if (WARN_ON(!cl || !cl->dev))
cl               1425 drivers/misc/mei/client.c 	dev = cl->dev;
cl               1428 drivers/misc/mei/client.c 		cl_dbg(dev, cl, "notifications not supported\n");
cl               1432 drivers/misc/mei/client.c 	if (!mei_cl_is_connected(cl))
cl               1435 drivers/misc/mei/client.c 	if (cl->notify_ev)
cl               1442 drivers/misc/mei/client.c 	rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev);
cl               1449 drivers/misc/mei/client.c 	*notify_ev = cl->notify_ev;
cl               1450 drivers/misc/mei/client.c 	cl->notify_ev = false;
cl               1463 drivers/misc/mei/client.c int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
cl               1469 drivers/misc/mei/client.c 	if (WARN_ON(!cl || !cl->dev))
cl               1472 drivers/misc/mei/client.c 	dev = cl->dev;
cl               1474 drivers/misc/mei/client.c 	if (!mei_cl_is_connected(cl))
cl               1477 drivers/misc/mei/client.c 	if (!mei_me_cl_is_active(cl->me_cl)) {
cl               1478 drivers/misc/mei/client.c 		cl_err(dev, cl, "no such me client\n");
cl               1482 drivers/misc/mei/client.c 	if (mei_cl_is_fixed_address(cl))
cl               1486 drivers/misc/mei/client.c 	if (cl->rx_flow_ctrl_creds)
cl               1489 drivers/misc/mei/client.c 	cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp);
cl               1496 drivers/misc/mei/client.c 		cl_err(dev, cl, "rpm: get failed %d\n", rets);
cl               1502 drivers/misc/mei/client.c 		rets = mei_hbm_cl_flow_control_req(dev, cl);
cl               1506 drivers/misc/mei/client.c 		list_move_tail(&cb->list, &cl->rd_pending);
cl               1508 drivers/misc/mei/client.c 	cl->rx_flow_ctrl_creds++;
cl               1511 drivers/misc/mei/client.c 	cl_dbg(dev, cl, "rpm: autosuspend\n");
cl               1529 drivers/misc/mei/client.c 	mei_hdr->host_addr = mei_cl_host_addr(cb->cl);
cl               1530 drivers/misc/mei/client.c 	mei_hdr->me_addr = mei_cl_me_id(cb->cl);
cl               1548 drivers/misc/mei/client.c int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
cl               1564 drivers/misc/mei/client.c 	if (WARN_ON(!cl || !cl->dev))
cl               1567 drivers/misc/mei/client.c 	dev = cl->dev;
cl               1573 drivers/misc/mei/client.c 	rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1;
cl               1578 drivers/misc/mei/client.c 		cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
cl               1627 drivers/misc/mei/client.c 	cl->status = 0;
cl               1628 drivers/misc/mei/client.c 	cl->writing_state = MEI_WRITING;
cl               1632 drivers/misc/mei/client.c 		if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) {
cl               1644 drivers/misc/mei/client.c 	cl->status = rets;
cl               1658 drivers/misc/mei/client.c ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
cl               1672 drivers/misc/mei/client.c 	if (WARN_ON(!cl || !cl->dev))
cl               1678 drivers/misc/mei/client.c 	dev = cl->dev;
cl               1683 drivers/misc/mei/client.c 	cl_dbg(dev, cl, "len=%zd\n", len);
cl               1691 drivers/misc/mei/client.c 		cl_err(dev, cl, "rpm: get failed %zd\n", rets);
cl               1696 drivers/misc/mei/client.c 	cl->writing_state = MEI_IDLE;
cl               1699 drivers/misc/mei/client.c 	rets = mei_cl_tx_flow_ctrl_creds(cl);
cl               1706 drivers/misc/mei/client.c 		cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
cl               1712 drivers/misc/mei/client.c 		cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
cl               1753 drivers/misc/mei/client.c 	rets = mei_cl_tx_flow_ctrl_creds_reduce(cl);
cl               1757 drivers/misc/mei/client.c 	cl->writing_state = MEI_WRITING;
cl               1769 drivers/misc/mei/client.c 	if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
cl               1772 drivers/misc/mei/client.c 		rets = wait_event_interruptible(cl->tx_wait,
cl               1773 drivers/misc/mei/client.c 				cl->writing_state == MEI_WRITE_COMPLETE ||
cl               1774 drivers/misc/mei/client.c 				(!mei_cl_is_connected(cl)));
cl               1782 drivers/misc/mei/client.c 		if (cl->writing_state != MEI_WRITE_COMPLETE) {
cl               1790 drivers/misc/mei/client.c 	cl_dbg(dev, cl, "rpm: autosuspend\n");
cl               1806 drivers/misc/mei/client.c void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
cl               1808 drivers/misc/mei/client.c 	struct mei_device *dev = cl->dev;
cl               1813 drivers/misc/mei/client.c 		cl->writing_state = MEI_WRITE_COMPLETE;
cl               1814 drivers/misc/mei/client.c 		if (waitqueue_active(&cl->tx_wait)) {
cl               1815 drivers/misc/mei/client.c 			wake_up_interruptible(&cl->tx_wait);
cl               1823 drivers/misc/mei/client.c 		list_add_tail(&cb->list, &cl->rd_completed);
cl               1824 drivers/misc/mei/client.c 		if (!mei_cl_is_fixed_address(cl) &&
cl               1825 drivers/misc/mei/client.c 		    !WARN_ON(!cl->rx_flow_ctrl_creds))
cl               1826 drivers/misc/mei/client.c 			cl->rx_flow_ctrl_creds--;
cl               1827 drivers/misc/mei/client.c 		if (!mei_cl_bus_rx_event(cl))
cl               1828 drivers/misc/mei/client.c 			wake_up_interruptible(&cl->rx_wait);
cl               1835 drivers/misc/mei/client.c 		if (waitqueue_active(&cl->wait))
cl               1836 drivers/misc/mei/client.c 			wake_up(&cl->wait);
cl               1841 drivers/misc/mei/client.c 		mei_cl_set_disconnected(cl);
cl               1856 drivers/misc/mei/client.c 	struct mei_cl *cl;
cl               1858 drivers/misc/mei/client.c 	list_for_each_entry(cl, &dev->file_list, link)
cl               1859 drivers/misc/mei/client.c 		mei_cl_set_disconnected(cl);
cl                 83 drivers/misc/mei/client.h int mei_cl_link(struct mei_cl *cl);
cl                 84 drivers/misc/mei/client.h int mei_cl_unlink(struct mei_cl *cl);
cl                 88 drivers/misc/mei/client.h struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl,
cl                 90 drivers/misc/mei/client.h struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
cl                 93 drivers/misc/mei/client.h struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
cl                 96 drivers/misc/mei/client.h int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp);
cl                109 drivers/misc/mei/client.h static inline bool mei_cl_is_connected(struct mei_cl *cl)
cl                111 drivers/misc/mei/client.h 	return  cl->state == MEI_FILE_CONNECTED;
cl                121 drivers/misc/mei/client.h static inline u8 mei_cl_me_id(const struct mei_cl *cl)
cl                123 drivers/misc/mei/client.h 	return cl->me_cl ? cl->me_cl->client_id : 0;
cl                133 drivers/misc/mei/client.h static inline size_t mei_cl_mtu(const struct mei_cl *cl)
cl                135 drivers/misc/mei/client.h 	return cl->me_cl->props.max_msg_length;
cl                145 drivers/misc/mei/client.h static inline bool mei_cl_is_fixed_address(const struct mei_cl *cl)
cl                147 drivers/misc/mei/client.h 	return cl->me_cl && cl->me_cl->props.fixed_address;
cl                158 drivers/misc/mei/client.h static inline bool mei_cl_is_single_recv_buf(const struct mei_cl *cl)
cl                160 drivers/misc/mei/client.h 	return cl->me_cl->props.single_recv_buf;
cl                170 drivers/misc/mei/client.h static inline const uuid_le *mei_cl_uuid(const struct mei_cl *cl)
cl                172 drivers/misc/mei/client.h 	return mei_me_cl_uuid(cl->me_cl);
cl                182 drivers/misc/mei/client.h static inline u8 mei_cl_host_addr(const struct mei_cl *cl)
cl                184 drivers/misc/mei/client.h 	return  mei_cl_is_fixed_address(cl) ? 0 : cl->host_client_id;
cl                187 drivers/misc/mei/client.h int mei_cl_disconnect(struct mei_cl *cl);
cl                188 drivers/misc/mei/client.h int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
cl                190 drivers/misc/mei/client.h int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
cl                192 drivers/misc/mei/client.h int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
cl                194 drivers/misc/mei/client.h int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp);
cl                195 drivers/misc/mei/client.h ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb);
cl                196 drivers/misc/mei/client.h int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
cl                199 drivers/misc/mei/client.h void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb);
cl                205 drivers/misc/mei/client.h int mei_cl_notify_request(struct mei_cl *cl,
cl                207 drivers/misc/mei/client.h int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
cl                209 drivers/misc/mei/client.h int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev);
cl                210 drivers/misc/mei/client.h void mei_cl_notify(struct mei_cl *cl);
cl                215 drivers/misc/mei/client.h #define MEI_CL_PRM(cl) (cl)->host_client_id, mei_cl_me_id(cl)
cl                217 drivers/misc/mei/client.h #define cl_dbg(dev, cl, format, arg...) \
cl                218 drivers/misc/mei/client.h 	dev_dbg((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
cl                220 drivers/misc/mei/client.h #define cl_warn(dev, cl, format, arg...) \
cl                221 drivers/misc/mei/client.h 	dev_warn((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
cl                223 drivers/misc/mei/client.h #define cl_err(dev, cl, format, arg...) \
cl                224 drivers/misc/mei/client.h 	dev_err((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
cl                 60 drivers/misc/mei/debugfs.c 	struct mei_cl *cl;
cl                 74 drivers/misc/mei/debugfs.c 	list_for_each_entry(cl, &dev->file_list, link) {
cl                 77 drivers/misc/mei/debugfs.c 			   i, mei_cl_me_id(cl), cl->host_client_id, cl->state,
cl                 78 drivers/misc/mei/debugfs.c 			   !list_empty(&cl->rd_completed), cl->writing_state,
cl                 79 drivers/misc/mei/debugfs.c 			   cl->tx_cb_queued);
cl                152 drivers/misc/mei/hbm.c void mei_hbm_cl_hdr(struct mei_cl *cl, u8 hbm_cmd, void *buf, size_t len)
cl                159 drivers/misc/mei/hbm.c 	cmd->host_addr = mei_cl_host_addr(cl);
cl                160 drivers/misc/mei/hbm.c 	cmd->me_addr = mei_cl_me_id(cl);
cl                174 drivers/misc/mei/hbm.c static inline int mei_hbm_cl_write(struct mei_device *dev, struct mei_cl *cl,
cl                180 drivers/misc/mei/hbm.c 	mei_hbm_cl_hdr(cl, hbm_cmd, buf, len);
cl                195 drivers/misc/mei/hbm.c bool mei_hbm_cl_addr_equal(struct mei_cl *cl, struct mei_hbm_cl_cmd *cmd)
cl                197 drivers/misc/mei/hbm.c 	return  mei_cl_host_addr(cl) == cmd->host_addr &&
cl                198 drivers/misc/mei/hbm.c 		mei_cl_me_id(cl) == cmd->me_addr;
cl                213 drivers/misc/mei/hbm.c 	struct mei_cl *cl;
cl                215 drivers/misc/mei/hbm.c 	list_for_each_entry(cl, &dev->file_list, link)
cl                216 drivers/misc/mei/hbm.c 		if (mei_hbm_cl_addr_equal(cl, cmd))
cl                217 drivers/misc/mei/hbm.c 			return cl;
cl                467 drivers/misc/mei/hbm.c 			  struct mei_cl *cl, u8 start)
cl                476 drivers/misc/mei/hbm.c 	mei_hbm_cl_hdr(cl, MEI_HBM_NOTIFY_REQ_CMD, &req, len);
cl                512 drivers/misc/mei/hbm.c 					struct mei_cl *cl,
cl                518 drivers/misc/mei/hbm.c 	cl_dbg(dev, cl, "hbm: notify start response status=%d\n", rs->status);
cl                522 drivers/misc/mei/hbm.c 		cl->notify_en = true;
cl                523 drivers/misc/mei/hbm.c 		cl->status = 0;
cl                525 drivers/misc/mei/hbm.c 		cl->status = -EINVAL;
cl                538 drivers/misc/mei/hbm.c 				       struct mei_cl *cl,
cl                544 drivers/misc/mei/hbm.c 	cl_dbg(dev, cl, "hbm: notify stop response status=%d\n", rs->status);
cl                548 drivers/misc/mei/hbm.c 		cl->notify_en = false;
cl                549 drivers/misc/mei/hbm.c 		cl->status = 0;
cl                552 drivers/misc/mei/hbm.c 		cl->status = -EINVAL;
cl                565 drivers/misc/mei/hbm.c 	struct mei_cl *cl;
cl                567 drivers/misc/mei/hbm.c 	cl = mei_hbm_cl_find_by_cmd(dev, cmd);
cl                568 drivers/misc/mei/hbm.c 	if (cl)
cl                569 drivers/misc/mei/hbm.c 		mei_cl_notify(cl);
cl                679 drivers/misc/mei/hbm.c int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl)
cl                683 drivers/misc/mei/hbm.c 	cl_dbg(dev, cl, "sending flow control\n");
cl                684 drivers/misc/mei/hbm.c 	return mei_hbm_cl_write(dev, cl, MEI_FLOW_CONTROL_CMD,
cl                732 drivers/misc/mei/hbm.c 	struct mei_cl *cl;
cl                740 drivers/misc/mei/hbm.c 	cl = mei_hbm_cl_find_by_cmd(dev, fctrl);
cl                741 drivers/misc/mei/hbm.c 	if (cl) {
cl                742 drivers/misc/mei/hbm.c 		cl->tx_flow_ctrl_creds++;
cl                743 drivers/misc/mei/hbm.c 		cl_dbg(dev, cl, "flow control creds = %d.\n",
cl                744 drivers/misc/mei/hbm.c 				cl->tx_flow_ctrl_creds);
cl                757 drivers/misc/mei/hbm.c int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl)
cl                761 drivers/misc/mei/hbm.c 	return mei_hbm_cl_write(dev, cl, CLIENT_DISCONNECT_REQ_CMD,
cl                773 drivers/misc/mei/hbm.c int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl)
cl                777 drivers/misc/mei/hbm.c 	return mei_hbm_cl_write(dev, cl, CLIENT_DISCONNECT_RES_CMD,
cl                789 drivers/misc/mei/hbm.c static void mei_hbm_cl_disconnect_res(struct mei_device *dev, struct mei_cl *cl,
cl                795 drivers/misc/mei/hbm.c 	cl_dbg(dev, cl, "hbm: disconnect response status=%d\n", rs->status);
cl                798 drivers/misc/mei/hbm.c 		cl->state = MEI_FILE_DISCONNECT_REPLY;
cl                799 drivers/misc/mei/hbm.c 	cl->status = 0;
cl                810 drivers/misc/mei/hbm.c int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl)
cl                814 drivers/misc/mei/hbm.c 	return mei_hbm_cl_write(dev, cl, CLIENT_CONNECT_REQ_CMD,
cl                826 drivers/misc/mei/hbm.c static void mei_hbm_cl_connect_res(struct mei_device *dev, struct mei_cl *cl,
cl                832 drivers/misc/mei/hbm.c 	cl_dbg(dev, cl, "hbm: connect response status=%s\n",
cl                836 drivers/misc/mei/hbm.c 		cl->state = MEI_FILE_CONNECTED;
cl                838 drivers/misc/mei/hbm.c 		cl->state = MEI_FILE_DISCONNECT_REPLY;
cl                840 drivers/misc/mei/hbm.c 			mei_me_cl_del(dev, cl->me_cl);
cl                845 drivers/misc/mei/hbm.c 	cl->status = mei_cl_conn_status_to_errno(rs->status);
cl                860 drivers/misc/mei/hbm.c 	struct mei_cl *cl;
cl                863 drivers/misc/mei/hbm.c 	cl = NULL;
cl                866 drivers/misc/mei/hbm.c 		cl = cb->cl;
cl                871 drivers/misc/mei/hbm.c 		if (mei_hbm_cl_addr_equal(cl, rs)) {
cl                877 drivers/misc/mei/hbm.c 	if (!cl)
cl                882 drivers/misc/mei/hbm.c 		mei_hbm_cl_connect_res(dev, cl, rs);
cl                885 drivers/misc/mei/hbm.c 		mei_hbm_cl_disconnect_res(dev, cl, rs);
cl                888 drivers/misc/mei/hbm.c 		mei_hbm_cl_notify_start_res(dev, cl, rs);
cl                891 drivers/misc/mei/hbm.c 		mei_hbm_cl_notify_stop_res(dev, cl, rs);
cl                897 drivers/misc/mei/hbm.c 	cl->timer_count = 0;
cl                898 drivers/misc/mei/hbm.c 	wake_up(&cl->wait);
cl                914 drivers/misc/mei/hbm.c 	struct mei_cl *cl;
cl                917 drivers/misc/mei/hbm.c 	cl = mei_hbm_cl_find_by_cmd(dev, disconnect_req);
cl                918 drivers/misc/mei/hbm.c 	if (cl) {
cl                919 drivers/misc/mei/hbm.c 		cl_warn(dev, cl, "fw disconnect request received\n");
cl                920 drivers/misc/mei/hbm.c 		cl->state = MEI_FILE_DISCONNECTING;
cl                921 drivers/misc/mei/hbm.c 		cl->timer_count = 0;
cl                923 drivers/misc/mei/hbm.c 		cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT_RSP,
cl                 43 drivers/misc/mei/hbm.h int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl);
cl                 44 drivers/misc/mei/hbm.h int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl);
cl                 45 drivers/misc/mei/hbm.h int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl);
cl                 46 drivers/misc/mei/hbm.h int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl);
cl                 51 drivers/misc/mei/hbm.h 			  struct mei_cl *cl, u8 request);
cl                 32 drivers/misc/mei/interrupt.c 	struct mei_cl *cl;
cl                 35 drivers/misc/mei/interrupt.c 		cl = cb->cl;
cl                 39 drivers/misc/mei/interrupt.c 		mei_cl_complete(cl, cb);
cl                 52 drivers/misc/mei/interrupt.c static inline int mei_cl_hbm_equal(struct mei_cl *cl,
cl                 55 drivers/misc/mei/interrupt.c 	return  mei_cl_host_addr(cl) == mei_hdr->host_addr &&
cl                 56 drivers/misc/mei/interrupt.c 		mei_cl_me_id(cl) == mei_hdr->me_addr;
cl                 87 drivers/misc/mei/interrupt.c static int mei_cl_irq_read_msg(struct mei_cl *cl,
cl                 91 drivers/misc/mei/interrupt.c 	struct mei_device *dev = cl->dev;
cl                 96 drivers/misc/mei/interrupt.c 	cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
cl                 98 drivers/misc/mei/interrupt.c 		if (!mei_cl_is_fixed_address(cl)) {
cl                 99 drivers/misc/mei/interrupt.c 			cl_err(dev, cl, "pending read cb not found\n");
cl                102 drivers/misc/mei/interrupt.c 		cb = mei_cl_alloc_cb(cl, mei_cl_mtu(cl), MEI_FOP_READ, cl->fp);
cl                105 drivers/misc/mei/interrupt.c 		list_add_tail(&cb->list, &cl->rd_pending);
cl                108 drivers/misc/mei/interrupt.c 	if (!mei_cl_is_connected(cl)) {
cl                109 drivers/misc/mei/interrupt.c 		cl_dbg(dev, cl, "not connected\n");
cl                119 drivers/misc/mei/interrupt.c 		cl_err(dev, cl, "message is too big len %d idx %zu\n",
cl                126 drivers/misc/mei/interrupt.c 		cl_dbg(dev, cl, "message overflow. size %zu len %d idx %zu\n",
cl                141 drivers/misc/mei/interrupt.c 		cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx);
cl                166 drivers/misc/mei/interrupt.c static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
cl                169 drivers/misc/mei/interrupt.c 	struct mei_device *dev = cl->dev;
cl                182 drivers/misc/mei/interrupt.c 	ret = mei_hbm_cl_disconnect_rsp(dev, cl);
cl                198 drivers/misc/mei/interrupt.c static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
cl                201 drivers/misc/mei/interrupt.c 	struct mei_device *dev = cl->dev;
cl                206 drivers/misc/mei/interrupt.c 	if (!list_empty(&cl->rd_pending))
cl                217 drivers/misc/mei/interrupt.c 	ret = mei_hbm_cl_flow_control_req(dev, cl);
cl                219 drivers/misc/mei/interrupt.c 		cl->status = ret;
cl                225 drivers/misc/mei/interrupt.c 	list_move_tail(&cb->list, &cl->rd_pending);
cl                268 drivers/misc/mei/interrupt.c 	struct mei_cl *cl;
cl                313 drivers/misc/mei/interrupt.c 	list_for_each_entry(cl, &dev->file_list, link) {
cl                314 drivers/misc/mei/interrupt.c 		if (mei_cl_hbm_equal(cl, mei_hdr)) {
cl                315 drivers/misc/mei/interrupt.c 			cl_dbg(dev, cl, "got a message\n");
cl                321 drivers/misc/mei/interrupt.c 	if (&cl->link == &dev->file_list) {
cl                339 drivers/misc/mei/interrupt.c 	ret = mei_cl_irq_read_msg(cl, mei_hdr, cmpl_list);
cl                371 drivers/misc/mei/interrupt.c 	struct mei_cl *cl;
cl                391 drivers/misc/mei/interrupt.c 		cl = cb->cl;
cl                393 drivers/misc/mei/interrupt.c 		cl->status = 0;
cl                394 drivers/misc/mei/interrupt.c 		cl_dbg(dev, cl, "MEI WRITE COMPLETE\n");
cl                395 drivers/misc/mei/interrupt.c 		cl->writing_state = MEI_WRITE_COMPLETE;
cl                402 drivers/misc/mei/interrupt.c 		cl = cb->cl;
cl                406 drivers/misc/mei/interrupt.c 			ret = mei_cl_irq_disconnect(cl, cb, cmpl_list);
cl                413 drivers/misc/mei/interrupt.c 			ret = mei_cl_irq_read(cl, cb, cmpl_list);
cl                420 drivers/misc/mei/interrupt.c 			ret = mei_cl_irq_connect(cl, cb, cmpl_list);
cl                427 drivers/misc/mei/interrupt.c 			ret = mei_cl_irq_disconnect_rsp(cl, cb, cmpl_list);
cl                434 drivers/misc/mei/interrupt.c 			ret = mei_cl_irq_notify(cl, cb, cmpl_list);
cl                446 drivers/misc/mei/interrupt.c 		cl = cb->cl;
cl                447 drivers/misc/mei/interrupt.c 		ret = mei_cl_irq_write(cl, cb, cmpl_list);
cl                461 drivers/misc/mei/interrupt.c static void mei_connect_timeout(struct mei_cl *cl)
cl                463 drivers/misc/mei/interrupt.c 	struct mei_device *dev = cl->dev;
cl                465 drivers/misc/mei/interrupt.c 	if (cl->state == MEI_FILE_CONNECTING) {
cl                467 drivers/misc/mei/interrupt.c 			cl->state = MEI_FILE_DISCONNECT_REQUIRED;
cl                468 drivers/misc/mei/interrupt.c 			wake_up(&cl->wait);
cl                496 drivers/misc/mei/interrupt.c 	struct mei_cl *cl;
cl                522 drivers/misc/mei/interrupt.c 	list_for_each_entry(cl, &dev->file_list, link) {
cl                523 drivers/misc/mei/interrupt.c 		if (cl->timer_count) {
cl                524 drivers/misc/mei/interrupt.c 			if (--cl->timer_count == 0) {
cl                526 drivers/misc/mei/interrupt.c 				mei_connect_timeout(cl);
cl                 48 drivers/misc/mei/main.c 	struct mei_cl *cl;
cl                 65 drivers/misc/mei/main.c 	cl = mei_cl_alloc_linked(dev);
cl                 66 drivers/misc/mei/main.c 	if (IS_ERR(cl)) {
cl                 67 drivers/misc/mei/main.c 		err = PTR_ERR(cl);
cl                 71 drivers/misc/mei/main.c 	cl->fp = file;
cl                 72 drivers/misc/mei/main.c 	file->private_data = cl;
cl                 93 drivers/misc/mei/main.c 	struct mei_cl *cl = file->private_data;
cl                 97 drivers/misc/mei/main.c 	if (WARN_ON(!cl || !cl->dev))
cl                100 drivers/misc/mei/main.c 	dev = cl->dev;
cl                104 drivers/misc/mei/main.c 	rets = mei_cl_disconnect(cl);
cl                106 drivers/misc/mei/main.c 	mei_cl_flush_queues(cl, file);
cl                107 drivers/misc/mei/main.c 	cl_dbg(dev, cl, "removing\n");
cl                109 drivers/misc/mei/main.c 	mei_cl_unlink(cl);
cl                113 drivers/misc/mei/main.c 	kfree(cl);
cl                133 drivers/misc/mei/main.c 	struct mei_cl *cl = file->private_data;
cl                139 drivers/misc/mei/main.c 	if (WARN_ON(!cl || !cl->dev))
cl                142 drivers/misc/mei/main.c 	dev = cl->dev;
cl                161 drivers/misc/mei/main.c 	cb = mei_cl_read_cb(cl, file);
cl                168 drivers/misc/mei/main.c 	rets = mei_cl_read_start(cl, length, file);
cl                170 drivers/misc/mei/main.c 		cl_dbg(dev, cl, "mei start read failure status = %zd\n", rets);
cl                180 drivers/misc/mei/main.c 	if (wait_event_interruptible(cl->rx_wait,
cl                181 drivers/misc/mei/main.c 				     !list_empty(&cl->rd_completed) ||
cl                182 drivers/misc/mei/main.c 				     !mei_cl_is_connected(cl))) {
cl                189 drivers/misc/mei/main.c 	if (!mei_cl_is_connected(cl)) {
cl                194 drivers/misc/mei/main.c 	cb = mei_cl_read_cb(cl, file);
cl                204 drivers/misc/mei/main.c 		cl_dbg(dev, cl, "read operation failed %zd\n", rets);
cl                208 drivers/misc/mei/main.c 	cl_dbg(dev, cl, "buf.size = %zu buf.idx = %zu offset = %lld\n",
cl                236 drivers/misc/mei/main.c 	cl_dbg(dev, cl, "end mei read rets = %zd\n", rets);
cl                253 drivers/misc/mei/main.c 	struct mei_cl *cl = file->private_data;
cl                258 drivers/misc/mei/main.c 	if (WARN_ON(!cl || !cl->dev))
cl                261 drivers/misc/mei/main.c 	dev = cl->dev;
cl                270 drivers/misc/mei/main.c 	if (!mei_cl_is_connected(cl)) {
cl                271 drivers/misc/mei/main.c 		cl_err(dev, cl, "is not connected");
cl                276 drivers/misc/mei/main.c 	if (!mei_me_cl_is_active(cl->me_cl)) {
cl                281 drivers/misc/mei/main.c 	if (length > mei_cl_mtu(cl)) {
cl                291 drivers/misc/mei/main.c 	while (cl->tx_cb_queued >= dev->tx_queue_limit) {
cl                297 drivers/misc/mei/main.c 		rets = wait_event_interruptible(cl->tx_wait,
cl                298 drivers/misc/mei/main.c 				cl->writing_state == MEI_WRITE_COMPLETE ||
cl                299 drivers/misc/mei/main.c 				(!mei_cl_is_connected(cl)));
cl                306 drivers/misc/mei/main.c 		if (!mei_cl_is_connected(cl)) {
cl                312 drivers/misc/mei/main.c 	cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file);
cl                326 drivers/misc/mei/main.c 	rets = mei_cl_write(cl, cb);
cl                348 drivers/misc/mei/main.c 	struct mei_cl *cl;
cl                351 drivers/misc/mei/main.c 	cl = file->private_data;
cl                352 drivers/misc/mei/main.c 	dev = cl->dev;
cl                357 drivers/misc/mei/main.c 	if (cl->state != MEI_FILE_INITIALIZING &&
cl                358 drivers/misc/mei/main.c 	    cl->state != MEI_FILE_DISCONNECTED)
cl                394 drivers/misc/mei/main.c 	rets = mei_cl_connect(cl, me_cl, file);
cl                412 drivers/misc/mei/main.c 	struct mei_cl *cl = file->private_data;
cl                418 drivers/misc/mei/main.c 	return mei_cl_notify_request(cl, file, (u8)request);
cl                431 drivers/misc/mei/main.c 	struct mei_cl *cl = file->private_data;
cl                436 drivers/misc/mei/main.c 	rets = mei_cl_notify_get(cl, block, &notify_ev);
cl                456 drivers/misc/mei/main.c 	struct mei_cl *cl = file->private_data;
cl                462 drivers/misc/mei/main.c 	if (WARN_ON(!cl || !cl->dev))
cl                465 drivers/misc/mei/main.c 	dev = cl->dev;
cl                564 drivers/misc/mei/main.c 	struct mei_cl *cl = file->private_data;
cl                569 drivers/misc/mei/main.c 	if (WARN_ON(!cl || !cl->dev))
cl                572 drivers/misc/mei/main.c 	dev = cl->dev;
cl                576 drivers/misc/mei/main.c 	notify_en = cl->notify_en && (req_events & EPOLLPRI);
cl                579 drivers/misc/mei/main.c 	    !mei_cl_is_connected(cl)) {
cl                585 drivers/misc/mei/main.c 		poll_wait(file, &cl->ev_wait, wait);
cl                586 drivers/misc/mei/main.c 		if (cl->notify_ev)
cl                591 drivers/misc/mei/main.c 		poll_wait(file, &cl->rx_wait, wait);
cl                593 drivers/misc/mei/main.c 		if (!list_empty(&cl->rd_completed))
cl                596 drivers/misc/mei/main.c 			mei_cl_read_start(cl, mei_cl_mtu(cl), file);
cl                600 drivers/misc/mei/main.c 		poll_wait(file, &cl->tx_wait, wait);
cl                601 drivers/misc/mei/main.c 		if (cl->tx_cb_queued < dev->tx_queue_limit)
cl                617 drivers/misc/mei/main.c static bool mei_cl_is_write_queued(struct mei_cl *cl)
cl                619 drivers/misc/mei/main.c 	struct mei_device *dev = cl->dev;
cl                623 drivers/misc/mei/main.c 		if (cb->cl == cl)
cl                626 drivers/misc/mei/main.c 		if (cb->cl == cl)
cl                643 drivers/misc/mei/main.c 	struct mei_cl *cl = fp->private_data;
cl                647 drivers/misc/mei/main.c 	if (WARN_ON(!cl || !cl->dev))
cl                650 drivers/misc/mei/main.c 	dev = cl->dev;
cl                654 drivers/misc/mei/main.c 	if (dev->dev_state != MEI_DEV_ENABLED || !mei_cl_is_connected(cl)) {
cl                659 drivers/misc/mei/main.c 	while (mei_cl_is_write_queued(cl)) {
cl                661 drivers/misc/mei/main.c 		rets = wait_event_interruptible(cl->tx_wait,
cl                662 drivers/misc/mei/main.c 				cl->writing_state == MEI_WRITE_COMPLETE ||
cl                663 drivers/misc/mei/main.c 				!mei_cl_is_connected(cl));
cl                670 drivers/misc/mei/main.c 		if (!mei_cl_is_connected(cl)) {
cl                695 drivers/misc/mei/main.c 	struct mei_cl *cl = file->private_data;
cl                697 drivers/misc/mei/main.c 	if (!mei_cl_is_connected(cl))
cl                700 drivers/misc/mei/main.c 	return fasync_helper(fd, file, band, &cl->ev_async);
cl                184 drivers/misc/mei/mei_dev.h 	struct mei_cl *cl;
cl                319 drivers/misc/mei/mei_dev.h ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
cl                321 drivers/misc/mei/mei_dev.h ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length,
cl                323 drivers/misc/mei/mei_dev.h bool mei_cl_bus_rx_event(struct mei_cl *cl);
cl                324 drivers/misc/mei/mei_dev.h bool mei_cl_bus_notify_event(struct mei_cl *cl);
cl                104 drivers/mmc/host/sdhci-pci.h #define SDHCI_PCI_DEVICE_CLASS(vend, cl, cl_msk, cfg) { \
cl                107 drivers/mmc/host/sdhci-pci.h 	.class = (cl), .class_mask = (cl_msk), \
cl                576 drivers/mtd/nand/raw/cafe_nand.c 	u8 ah, al, bh, bl, ch, cl;
cl                584 drivers/mtd/nand/raw/cafe_nand.c 	cl = gf64_mul(gf64_mul(ah, bh), 0x21) ^ gf64_mul(al, bl);
cl                586 drivers/mtd/nand/raw/cafe_nand.c 	return (ch << 6) ^ cl;
cl               10368 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	cmd.u.params.cl = class;
cl               3521 drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h 			__u8   cl;
cl                416 drivers/net/hamradio/scc.c 		cl(scc, R10, ABUNDER);		/* send CRC */
cl                473 drivers/net/hamradio/scc.c 			cl(scc,R3,ENT_HM|RxENABLE); /* disable the receiver */
cl                721 drivers/net/hamradio/scc.c 	cl(scc,R14,BRENABL);		/* disable baudrate generator */
cl                924 drivers/net/hamradio/scc.c 			cl(scc, R3, RxENABLE|ENT_HM);	/* switch off receiver */
cl                925 drivers/net/hamradio/scc.c 			cl(scc, R15, DCDIE|SYNCIE);	/* No DCD changes, please */
cl                941 drivers/net/hamradio/scc.c 			cl(scc,R5,RTS|TxENAB);
cl                962 drivers/net/hamradio/scc.c 				cl(scc, R3, RxENABLE);
cl                963 drivers/net/hamradio/scc.c 				cl(scc, R15, DCDIE|SYNCIE);
cl                975 drivers/net/hamradio/scc.c 			cl(scc,R5,RTS|TxENAB);		/* disable tx */
cl               1251 drivers/net/hamradio/scc.c 	cl(scc, R1, TxINT_ENAB);	/* force an ABORT, but don't */
cl               1252 drivers/net/hamradio/scc.c 	cl(scc, R15, TxUIE);		/* count it. */
cl               1325 drivers/net/hamradio/scc.c 				cl(scc, R15, DCDIE);
cl               1329 drivers/net/hamradio/scc.c 				cl(scc, R15, SYNCIE);
cl                307 drivers/net/usb/cx82310_eth.c #define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \
cl                312 drivers/net/usb/cx82310_eth.c 	.bDeviceClass = (cl), \
cl                543 drivers/power/supply/lp8727_charger.c static int lp8727_probe(struct i2c_client *cl, const struct i2c_device_id *id)
cl                549 drivers/power/supply/lp8727_charger.c 	if (!i2c_check_functionality(cl->adapter, I2C_FUNC_SMBUS_I2C_BLOCK))
cl                552 drivers/power/supply/lp8727_charger.c 	if (cl->dev.of_node) {
cl                553 drivers/power/supply/lp8727_charger.c 		pdata = lp8727_parse_dt(&cl->dev);
cl                557 drivers/power/supply/lp8727_charger.c 		pdata = dev_get_platdata(&cl->dev);
cl                560 drivers/power/supply/lp8727_charger.c 	pchg = devm_kzalloc(&cl->dev, sizeof(*pchg), GFP_KERNEL);
cl                564 drivers/power/supply/lp8727_charger.c 	pchg->client = cl;
cl                565 drivers/power/supply/lp8727_charger.c 	pchg->dev = &cl->dev;
cl                567 drivers/power/supply/lp8727_charger.c 	i2c_set_clientdata(cl, pchg);
cl                593 drivers/power/supply/lp8727_charger.c static int lp8727_remove(struct i2c_client *cl)
cl                595 drivers/power/supply/lp8727_charger.c 	struct lp8727_chg *pchg = i2c_get_clientdata(cl);
cl                890 drivers/regulator/lp872x.c static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
cl                900 drivers/regulator/lp872x.c 	if (cl->dev.of_node) {
cl                901 drivers/regulator/lp872x.c 		pdata = lp872x_populate_pdata_from_dt(&cl->dev,
cl                906 drivers/regulator/lp872x.c 		pdata = dev_get_platdata(&cl->dev);
cl                909 drivers/regulator/lp872x.c 	lp = devm_kzalloc(&cl->dev, sizeof(struct lp872x), GFP_KERNEL);
cl                915 drivers/regulator/lp872x.c 	lp->regmap = devm_regmap_init_i2c(cl, &lp872x_regmap_config);
cl                918 drivers/regulator/lp872x.c 		dev_err(&cl->dev, "regmap init i2c err: %d\n", ret);
cl                922 drivers/regulator/lp872x.c 	lp->dev = &cl->dev;
cl                925 drivers/regulator/lp872x.c 	i2c_set_clientdata(cl, lp);
cl                264 drivers/remoteproc/stm32_rproc.c static void stm32_rproc_mb_callback(struct mbox_client *cl, void *data)
cl                266 drivers/remoteproc/stm32_rproc.c 	struct rproc *rproc = dev_get_drvdata(cl->dev);
cl                267 drivers/remoteproc/stm32_rproc.c 	struct stm32_mbox *mb = container_of(cl, struct stm32_mbox, client);
cl                319 drivers/remoteproc/stm32_rproc.c 	struct mbox_client *cl;
cl                327 drivers/remoteproc/stm32_rproc.c 		cl = &ddata->mb[i].client;
cl                328 drivers/remoteproc/stm32_rproc.c 		cl->dev = dev->parent;
cl                330 drivers/remoteproc/stm32_rproc.c 		ddata->mb[i].chan = mbox_request_channel_byname(cl, name);
cl                229 drivers/sbus/char/oradax.c static struct class *cl;
cl                326 drivers/sbus/char/oradax.c 	cl = class_create(THIS_MODULE, DAX_NAME);
cl                327 drivers/sbus/char/oradax.c 	if (IS_ERR(cl)) {
cl                329 drivers/sbus/char/oradax.c 		ret = PTR_ERR(cl);
cl                333 drivers/sbus/char/oradax.c 	if (device_create(cl, NULL, first, NULL, dax_name) == NULL) {
cl                350 drivers/sbus/char/oradax.c 	device_destroy(cl, first);
cl                352 drivers/sbus/char/oradax.c 	class_destroy(cl);
cl                365 drivers/sbus/char/oradax.c 	device_destroy(cl, first);
cl                366 drivers/sbus/char/oradax.c 	class_destroy(cl);
cl               2085 drivers/scsi/megaraid/megaraid_sas.h 	union megasas_evt_class_locale cl;
cl                413 drivers/scsi/megaraid/megaraid_sas_base.c 	class_locale.word = le32_to_cpu(evt_detail->cl.word);
cl                 85 drivers/soc/mediatek/mtk-cmdq-helper.c 	pkt->cl = (void *)client;
cl                105 drivers/soc/mediatek/mtk-cmdq-helper.c 	struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
cl                212 drivers/soc/mediatek/mtk-cmdq-helper.c 	struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
cl                239 drivers/soc/mediatek/mtk-cmdq-helper.c 	struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
cl                 22 drivers/staging/iio/addac/adt7316-i2c.c 	struct i2c_client *cl = client;
cl                 25 drivers/staging/iio/addac/adt7316-i2c.c 	ret = i2c_smbus_write_byte(cl, reg);
cl                 27 drivers/staging/iio/addac/adt7316-i2c.c 		dev_err(&cl->dev, "I2C fail to select reg\n");
cl                 33 drivers/staging/iio/addac/adt7316-i2c.c 		dev_err(&cl->dev, "I2C read error\n");
cl                 44 drivers/staging/iio/addac/adt7316-i2c.c 	struct i2c_client *cl = client;
cl                 47 drivers/staging/iio/addac/adt7316-i2c.c 	ret = i2c_smbus_write_byte_data(cl, reg, data);
cl                 49 drivers/staging/iio/addac/adt7316-i2c.c 		dev_err(&cl->dev, "I2C write error\n");
cl                 56 drivers/staging/iio/addac/adt7316-i2c.c 	struct i2c_client *cl = client;
cl                 63 drivers/staging/iio/addac/adt7316-i2c.c 		ret = adt7316_i2c_read(cl, reg, &data[i]);
cl                 65 drivers/staging/iio/addac/adt7316-i2c.c 			dev_err(&cl->dev, "I2C multi read error\n");
cl                 75 drivers/staging/iio/addac/adt7316-i2c.c 	struct i2c_client *cl = client;
cl                 82 drivers/staging/iio/addac/adt7316-i2c.c 		ret = adt7316_i2c_write(cl, reg, data[i]);
cl                 84 drivers/staging/iio/addac/adt7316-i2c.c 			dev_err(&cl->dev, "I2C multi write error\n");
cl                161 drivers/tty/serial/tegra-tcu.c static void tegra_tcu_receive(struct mbox_client *cl, void *msg)
cl                163 drivers/tty/serial/tegra-tcu.c 	struct tegra_tcu *tcu = container_of(cl, struct tegra_tcu, rx_client);
cl                113 drivers/usb/storage/usb.c #define UNUSUAL_VENDOR_INTF(idVendor, cl, sc, pr, \
cl                 30 drivers/usb/storage/usual-tables.c #define UNUSUAL_VENDOR_INTF(id_vendor, cl, sc, pr, \
cl                 37 drivers/usb/storage/usual-tables.c 	.bInterfaceClass = (cl), \
cl                244 drivers/video/backlight/arcxcnn_bl.c static int arcxcnn_probe(struct i2c_client *cl, const struct i2c_device_id *id)
cl                249 drivers/video/backlight/arcxcnn_bl.c 	if (!i2c_check_functionality(cl->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
cl                252 drivers/video/backlight/arcxcnn_bl.c 	lp = devm_kzalloc(&cl->dev, sizeof(*lp), GFP_KERNEL);
cl                256 drivers/video/backlight/arcxcnn_bl.c 	lp->client = cl;
cl                257 drivers/video/backlight/arcxcnn_bl.c 	lp->dev = &cl->dev;
cl                258 drivers/video/backlight/arcxcnn_bl.c 	lp->pdata = dev_get_platdata(&cl->dev);
cl                301 drivers/video/backlight/arcxcnn_bl.c 	i2c_set_clientdata(cl, lp);
cl                365 drivers/video/backlight/arcxcnn_bl.c static int arcxcnn_remove(struct i2c_client *cl)
cl                367 drivers/video/backlight/arcxcnn_bl.c 	struct arcxcnn *lp = i2c_get_clientdata(cl);
cl                398 drivers/video/backlight/lp855x_bl.c static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
cl                403 drivers/video/backlight/lp855x_bl.c 	if (!i2c_check_functionality(cl->adapter, I2C_FUNC_SMBUS_I2C_BLOCK))
cl                406 drivers/video/backlight/lp855x_bl.c 	lp = devm_kzalloc(&cl->dev, sizeof(struct lp855x), GFP_KERNEL);
cl                410 drivers/video/backlight/lp855x_bl.c 	lp->client = cl;
cl                411 drivers/video/backlight/lp855x_bl.c 	lp->dev = &cl->dev;
cl                414 drivers/video/backlight/lp855x_bl.c 	lp->pdata = dev_get_platdata(&cl->dev);
cl                450 drivers/video/backlight/lp855x_bl.c 			dev_err(&cl->dev, "failed to enable supply: %d\n", ret);
cl                469 drivers/video/backlight/lp855x_bl.c 	i2c_set_clientdata(cl, lp);
cl                494 drivers/video/backlight/lp855x_bl.c static int lp855x_remove(struct i2c_client *cl)
cl                496 drivers/video/backlight/lp855x_bl.c 	struct lp855x *lp = i2c_get_clientdata(cl);
cl               4030 fs/btrfs/tree-log.c 				u64 ds, dl, cs, cl;
cl               4040 fs/btrfs/tree-log.c 				cl = btrfs_file_extent_num_bytes(src,
cl               4045 fs/btrfs/tree-log.c 					cl = dl;
cl               4050 fs/btrfs/tree-log.c 						ds + cs, ds + cs + cl - 1,
cl                108 fs/dlm/config.c 	struct dlm_cluster *cl = config_item_to_cluster(item);
cl                109 fs/dlm/config.c 	return sprintf(buf, "%s\n", cl->cl_cluster_name);
cl                115 fs/dlm/config.c 	struct dlm_cluster *cl = config_item_to_cluster(item);
cl                119 fs/dlm/config.c 	strlcpy(cl->cl_cluster_name, buf, sizeof(cl->cl_cluster_name));
cl                125 fs/dlm/config.c static ssize_t cluster_set(struct dlm_cluster *cl, unsigned int *cl_field,
cl                151 fs/dlm/config.c 	struct dlm_cluster *cl = config_item_to_cluster(item);		      \
cl                152 fs/dlm/config.c 	return cluster_set(cl, &cl->cl_##name, &dlm_config.ci_##name,         \
cl                157 fs/dlm/config.c 	struct dlm_cluster *cl = config_item_to_cluster(item);		      \
cl                158 fs/dlm/config.c 	return snprintf(buf, PAGE_SIZE, "%u\n", cl->cl_##name);               \
cl                345 fs/dlm/config.c 	struct dlm_cluster *cl = NULL;
cl                349 fs/dlm/config.c 	cl = kzalloc(sizeof(struct dlm_cluster), GFP_NOFS);
cl                353 fs/dlm/config.c 	if (!cl || !sps || !cms)
cl                356 fs/dlm/config.c 	config_group_init_type_name(&cl->group, name, &cluster_type);
cl                360 fs/dlm/config.c 	configfs_add_default_group(&sps->ss_group, &cl->group);
cl                361 fs/dlm/config.c 	configfs_add_default_group(&cms->cs_group, &cl->group);
cl                363 fs/dlm/config.c 	cl->cl_tcp_port = dlm_config.ci_tcp_port;
cl                364 fs/dlm/config.c 	cl->cl_buffer_size = dlm_config.ci_buffer_size;
cl                365 fs/dlm/config.c 	cl->cl_rsbtbl_size = dlm_config.ci_rsbtbl_size;
cl                366 fs/dlm/config.c 	cl->cl_recover_timer = dlm_config.ci_recover_timer;
cl                367 fs/dlm/config.c 	cl->cl_toss_secs = dlm_config.ci_toss_secs;
cl                368 fs/dlm/config.c 	cl->cl_scan_secs = dlm_config.ci_scan_secs;
cl                369 fs/dlm/config.c 	cl->cl_log_debug = dlm_config.ci_log_debug;
cl                370 fs/dlm/config.c 	cl->cl_log_info = dlm_config.ci_log_info;
cl                371 fs/dlm/config.c 	cl->cl_protocol = dlm_config.ci_protocol;
cl                372 fs/dlm/config.c 	cl->cl_timewarn_cs = dlm_config.ci_timewarn_cs;
cl                373 fs/dlm/config.c 	cl->cl_waitwarn_us = dlm_config.ci_waitwarn_us;
cl                374 fs/dlm/config.c 	cl->cl_new_rsb_count = dlm_config.ci_new_rsb_count;
cl                375 fs/dlm/config.c 	cl->cl_recover_callbacks = dlm_config.ci_recover_callbacks;
cl                376 fs/dlm/config.c 	memcpy(cl->cl_cluster_name, dlm_config.ci_cluster_name,
cl                381 fs/dlm/config.c 	return &cl->group;
cl                384 fs/dlm/config.c 	kfree(cl);
cl                392 fs/dlm/config.c 	struct dlm_cluster *cl = config_item_to_cluster(i);
cl                394 fs/dlm/config.c 	configfs_remove_default_groups(&cl->group);
cl                404 fs/dlm/config.c 	struct dlm_cluster *cl = config_item_to_cluster(i);
cl                405 fs/dlm/config.c 	kfree(cl);
cl                 60 fs/erofs/zdata.c 	struct z_erofs_collection *cl = z_erofs_primarycollection(pcl);
cl                 63 fs/erofs/zdata.c 	mutex_init(&cl->lock);
cl                 64 fs/erofs/zdata.c 	cl->nr_pages = 0;
cl                 65 fs/erofs/zdata.c 	cl->vcnt = 0;
cl                 72 fs/erofs/zdata.c 	struct z_erofs_collection *cl = z_erofs_primarycollection(pcl);
cl                 76 fs/erofs/zdata.c 	DBG_BUGON(cl->nr_pages);
cl                 77 fs/erofs/zdata.c 	DBG_BUGON(cl->vcnt);
cl                135 fs/erofs/zdata.c 	struct z_erofs_collection *cl;
cl                305 fs/erofs/zdata.c 	clt->cl->vcnt += (unsigned int)ret;
cl                346 fs/erofs/zdata.c 	struct z_erofs_collection *cl;
cl                361 fs/erofs/zdata.c 	cl = z_erofs_primarycollection(pcl);
cl                362 fs/erofs/zdata.c 	if (cl->pageofs != (map->m_la & ~PAGE_MASK)) {
cl                387 fs/erofs/zdata.c 	mutex_lock(&cl->lock);
cl                396 fs/erofs/zdata.c 	clt->cl = cl;
cl                397 fs/erofs/zdata.c 	return cl;
cl                405 fs/erofs/zdata.c 	struct z_erofs_collection *cl;
cl                432 fs/erofs/zdata.c 	cl = z_erofs_primarycollection(pcl);
cl                433 fs/erofs/zdata.c 	cl->pageofs = map->m_la & ~PAGE_MASK;
cl                439 fs/erofs/zdata.c 	mutex_trylock(&cl->lock);
cl                443 fs/erofs/zdata.c 		mutex_unlock(&cl->lock);
cl                452 fs/erofs/zdata.c 	clt->cl = cl;
cl                453 fs/erofs/zdata.c 	return cl;
cl                460 fs/erofs/zdata.c 	struct z_erofs_collection *cl;
cl                462 fs/erofs/zdata.c 	DBG_BUGON(clt->cl);
cl                474 fs/erofs/zdata.c 	cl = cllookup(clt, inode, map);
cl                475 fs/erofs/zdata.c 	if (!cl) {
cl                476 fs/erofs/zdata.c 		cl = clregister(clt, inode, map);
cl                478 fs/erofs/zdata.c 		if (cl == ERR_PTR(-EAGAIN))
cl                482 fs/erofs/zdata.c 	if (IS_ERR(cl))
cl                483 fs/erofs/zdata.c 		return PTR_ERR(cl);
cl                486 fs/erofs/zdata.c 				  cl->pagevec, cl->vcnt);
cl                500 fs/erofs/zdata.c 	struct z_erofs_collection *const cl =
cl                504 fs/erofs/zdata.c 			container_of(cl, struct z_erofs_pcluster,
cl                512 fs/erofs/zdata.c 	struct z_erofs_collection *const cl = z_erofs_primarycollection(pcl);
cl                514 fs/erofs/zdata.c 	call_rcu(&cl->rcu, z_erofs_rcu_callback);
cl                517 fs/erofs/zdata.c static void z_erofs_collection_put(struct z_erofs_collection *cl)
cl                520 fs/erofs/zdata.c 		container_of(cl, struct z_erofs_pcluster, primary_collection);
cl                527 fs/erofs/zdata.c 	struct z_erofs_collection *cl = clt->cl;
cl                529 fs/erofs/zdata.c 	if (!cl)
cl                533 fs/erofs/zdata.c 	mutex_unlock(&cl->lock);
cl                540 fs/erofs/zdata.c 		z_erofs_collection_put(cl);
cl                542 fs/erofs/zdata.c 	clt->cl = NULL;
cl                597 fs/erofs/zdata.c 		if (!clt->cl)
cl                679 fs/erofs/zdata.c 	clt->cl->nr_pages = max_t(pgoff_t, clt->cl->nr_pages, index + 1);
cl                768 fs/erofs/zdata.c 	struct z_erofs_collection *cl;
cl                772 fs/erofs/zdata.c 	cl = z_erofs_primarycollection(pcl);
cl                773 fs/erofs/zdata.c 	DBG_BUGON(!READ_ONCE(cl->nr_pages));
cl                775 fs/erofs/zdata.c 	mutex_lock(&cl->lock);
cl                776 fs/erofs/zdata.c 	nr_pages = cl->nr_pages;
cl                804 fs/erofs/zdata.c 				  cl->pagevec, 0);
cl                806 fs/erofs/zdata.c 	for (i = 0; i < cl->vcnt; ++i) {
cl                887 fs/erofs/zdata.c 	if (nr_pages << PAGE_SHIFT >= cl->pageofs + llen) {
cl                891 fs/erofs/zdata.c 		outputsize = (nr_pages << PAGE_SHIFT) - cl->pageofs;
cl                899 fs/erofs/zdata.c 					.pageofs_out = cl->pageofs,
cl                943 fs/erofs/zdata.c 	cl->nr_pages = 0;
cl                944 fs/erofs/zdata.c 	cl->vcnt = 0;
cl                950 fs/erofs/zdata.c 	mutex_unlock(&cl->lock);
cl                952 fs/erofs/zdata.c 	z_erofs_collection_put(cl);
cl                679 fs/nfsd/nfs4state.c 	struct nfs4_client *cl;
cl                684 fs/nfsd/nfs4state.c 	cl = new->co_client;
cl                688 fs/nfsd/nfs4state.c 		if (co->co_client == cl) {
cl                701 fs/nfsd/nfs4state.c struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
cl                712 fs/nfsd/nfs4state.c 	spin_lock(&cl->cl_lock);
cl                714 fs/nfsd/nfs4state.c 	new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT);
cl                715 fs/nfsd/nfs4state.c 	spin_unlock(&cl->cl_lock);
cl                721 fs/nfsd/nfs4state.c 	stid->sc_client = cl;
cl                723 fs/nfsd/nfs4state.c 	stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
cl               2194 fs/nfsd/nfs4state.c bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
cl               2198 fs/nfsd/nfs4state.c 	if (!cl->cl_mach_cred)
cl               2200 fs/nfsd/nfs4state.c 	if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
cl               2204 fs/nfsd/nfs4state.c 	if (cl->cl_cred.cr_raw_principal)
cl               2205 fs/nfsd/nfs4state.c 		return 0 == strcmp(cl->cl_cred.cr_raw_principal,
cl               2209 fs/nfsd/nfs4state.c 	return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
cl               2233 fs/nfsd/nfs4state.c find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
cl               2237 fs/nfsd/nfs4state.c 	ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
cl               2244 fs/nfsd/nfs4state.c find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
cl               2248 fs/nfsd/nfs4state.c 	spin_lock(&cl->cl_lock);
cl               2249 fs/nfsd/nfs4state.c 	s = find_stateid_locked(cl, t);
cl               2256 fs/nfsd/nfs4state.c 	spin_unlock(&cl->cl_lock);
cl               4604 fs/nfsd/nfs4state.c static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
cl               4608 fs/nfsd/nfs4state.c 	ret = find_stateid_by_type(cl, s,
cl               4622 fs/nfsd/nfs4state.c nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
cl               4629 fs/nfsd/nfs4state.c 	deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
cl               4634 fs/nfsd/nfs4state.c 		if (cl->cl_minorversion)
cl               5003 fs/nfsd/nfs4state.c 	struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
cl               5017 fs/nfsd/nfs4state.c 		status = nfs4_check_deleg(cl, open, &dp);
cl               5459 fs/nfsd/nfs4state.c static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
cl               5468 fs/nfsd/nfs4state.c 	if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
cl               5470 fs/nfsd/nfs4state.c 		rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str,
cl               5476 fs/nfsd/nfs4state.c 	spin_lock(&cl->cl_lock);
cl               5477 fs/nfsd/nfs4state.c 	s = find_stateid_locked(cl, stateid);
cl               5502 fs/nfsd/nfs4state.c 	spin_unlock(&cl->cl_lock);
cl               5675 fs/nfsd/nfs4state.c 	struct nfs4_client *cl = cstate->session->se_client;
cl               5679 fs/nfsd/nfs4state.c 			nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
cl               5721 fs/nfsd/nfs4state.c 	struct nfs4_client *cl = cstate->session->se_client;
cl               5724 fs/nfsd/nfs4state.c 	spin_lock(&cl->cl_lock);
cl               5725 fs/nfsd/nfs4state.c 	s = find_stateid_locked(cl, stateid);
cl               5742 fs/nfsd/nfs4state.c 		spin_unlock(&cl->cl_lock);
cl               5749 fs/nfsd/nfs4state.c 		spin_unlock(&cl->cl_lock);
cl               5757 fs/nfsd/nfs4state.c 	spin_unlock(&cl->cl_lock);
cl               6373 fs/nfsd/nfs4state.c 	struct nfs4_client *cl = oo->oo_owner.so_client;
cl               6379 fs/nfsd/nfs4state.c 	lo = find_lockowner_str(cl, &lock->lk_new_owner);
cl               6382 fs/nfsd/nfs4state.c 		lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
cl                624 fs/nfsd/state.h struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
cl                731 fs/nfsd/xdr4.h bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp);
cl                558 fs/nls/mac-celtic.c 	unsigned char cl = uni & 0x00ff;
cl                565 fs/nls/mac-celtic.c 	if (uni2charset && uni2charset[cl])
cl                566 fs/nls/mac-celtic.c 		out[0] = uni2charset[cl];
cl                488 fs/nls/mac-centeuro.c 	unsigned char cl = uni & 0x00ff;
cl                495 fs/nls/mac-centeuro.c 	if (uni2charset && uni2charset[cl])
cl                496 fs/nls/mac-centeuro.c 		out[0] = uni2charset[cl];
cl                558 fs/nls/mac-croatian.c 	unsigned char cl = uni & 0x00ff;
cl                565 fs/nls/mac-croatian.c 	if (uni2charset && uni2charset[cl])
cl                566 fs/nls/mac-croatian.c 		out[0] = uni2charset[cl];
cl                453 fs/nls/mac-cyrillic.c 	unsigned char cl = uni & 0x00ff;
cl                460 fs/nls/mac-cyrillic.c 	if (uni2charset && uni2charset[cl])
cl                461 fs/nls/mac-cyrillic.c 		out[0] = uni2charset[cl];
cl                523 fs/nls/mac-gaelic.c 	unsigned char cl = uni & 0x00ff;
cl                530 fs/nls/mac-gaelic.c 	if (uni2charset && uni2charset[cl])
cl                531 fs/nls/mac-gaelic.c 		out[0] = uni2charset[cl];
cl                453 fs/nls/mac-greek.c 	unsigned char cl = uni & 0x00ff;
cl                460 fs/nls/mac-greek.c 	if (uni2charset && uni2charset[cl])
cl                461 fs/nls/mac-greek.c 		out[0] = uni2charset[cl];
cl                558 fs/nls/mac-iceland.c 	unsigned char cl = uni & 0x00ff;
cl                565 fs/nls/mac-iceland.c 	if (uni2charset && uni2charset[cl])
cl                566 fs/nls/mac-iceland.c 		out[0] = uni2charset[cl];
cl                488 fs/nls/mac-inuit.c 	unsigned char cl = uni & 0x00ff;
cl                495 fs/nls/mac-inuit.c 	if (uni2charset && uni2charset[cl])
cl                496 fs/nls/mac-inuit.c 		out[0] = uni2charset[cl];
cl                593 fs/nls/mac-roman.c 	unsigned char cl = uni & 0x00ff;
cl                600 fs/nls/mac-roman.c 	if (uni2charset && uni2charset[cl])
cl                601 fs/nls/mac-roman.c 		out[0] = uni2charset[cl];
cl                558 fs/nls/mac-romanian.c 	unsigned char cl = uni & 0x00ff;
cl                565 fs/nls/mac-romanian.c 	if (uni2charset && uni2charset[cl])
cl                566 fs/nls/mac-romanian.c 		out[0] = uni2charset[cl];
cl                558 fs/nls/mac-turkish.c 	unsigned char cl = uni & 0x00ff;
cl                565 fs/nls/mac-turkish.c 	if (uni2charset && uni2charset[cl])
cl                566 fs/nls/mac-turkish.c 		out[0] = uni2charset[cl];
cl                123 fs/nls/nls_ascii.c 	unsigned char cl = uni & 0x00ff;
cl                130 fs/nls/nls_ascii.c 	if (uni2charset && uni2charset[cl])
cl                131 fs/nls/nls_ascii.c 		out[0] = uni2charset[cl];
cl                501 fs/nls/nls_base.c 	unsigned char cl = uni & 0x00ff;
cl                508 fs/nls/nls_base.c 	if (uni2charset && uni2charset[cl])
cl                509 fs/nls/nls_base.c 		out[0] = uni2charset[cl];
cl                304 fs/nls/nls_cp1250.c         unsigned char cl = uni & 0x00ff;
cl                311 fs/nls/nls_cp1250.c         if (uni2charset && uni2charset[cl])
cl                312 fs/nls/nls_cp1250.c                 out[0] = uni2charset[cl];
cl                258 fs/nls/nls_cp1251.c 	unsigned char cl = uni & 0x00ff;
cl                265 fs/nls/nls_cp1251.c 	if (uni2charset && uni2charset[cl])
cl                266 fs/nls/nls_cp1251.c 		out[0] = uni2charset[cl];
cl                339 fs/nls/nls_cp1255.c 	unsigned char cl = uni & 0x00ff;
cl                346 fs/nls/nls_cp1255.c 	if (uni2charset && uni2charset[cl])
cl                347 fs/nls/nls_cp1255.c 		out[0] = uni2charset[cl];
cl                344 fs/nls/nls_cp437.c 	unsigned char cl = uni & 0x00ff;
cl                351 fs/nls/nls_cp437.c 	if (uni2charset && uni2charset[cl])
cl                352 fs/nls/nls_cp437.c 		out[0] = uni2charset[cl];
cl                307 fs/nls/nls_cp737.c 	unsigned char cl = uni & 0x00ff;
cl                314 fs/nls/nls_cp737.c 	if (uni2charset && uni2charset[cl])
cl                315 fs/nls/nls_cp737.c 		out[0] = uni2charset[cl];
cl                276 fs/nls/nls_cp775.c 	unsigned char cl = uni & 0x00ff;
cl                283 fs/nls/nls_cp775.c 	if (uni2charset && uni2charset[cl])
cl                284 fs/nls/nls_cp775.c 		out[0] = uni2charset[cl];
cl                272 fs/nls/nls_cp850.c 	unsigned char cl = uni & 0x00ff;
cl                279 fs/nls/nls_cp850.c 	if (uni2charset && uni2charset[cl])
cl                280 fs/nls/nls_cp850.c 		out[0] = uni2charset[cl];
cl                294 fs/nls/nls_cp852.c 	unsigned char cl = uni & 0x00ff;
cl                301 fs/nls/nls_cp852.c 	if (uni2charset && uni2charset[cl])
cl                302 fs/nls/nls_cp852.c 		out[0] = uni2charset[cl];
cl                256 fs/nls/nls_cp855.c 	unsigned char cl = uni & 0x00ff;
cl                263 fs/nls/nls_cp855.c 	if (uni2charset && uni2charset[cl])
cl                264 fs/nls/nls_cp855.c 		out[0] = uni2charset[cl];
cl                258 fs/nls/nls_cp857.c 	unsigned char cl = uni & 0x00ff;
cl                265 fs/nls/nls_cp857.c 	if (uni2charset && uni2charset[cl])
cl                266 fs/nls/nls_cp857.c 		out[0] = uni2charset[cl];
cl                321 fs/nls/nls_cp860.c 	unsigned char cl = uni & 0x00ff;
cl                328 fs/nls/nls_cp860.c 	if (uni2charset && uni2charset[cl])
cl                329 fs/nls/nls_cp860.c 		out[0] = uni2charset[cl];
cl                344 fs/nls/nls_cp861.c 	unsigned char cl = uni & 0x00ff;
cl                351 fs/nls/nls_cp861.c 	if (uni2charset && uni2charset[cl])
cl                352 fs/nls/nls_cp861.c 		out[0] = uni2charset[cl];
cl                378 fs/nls/nls_cp862.c 	unsigned char cl = uni & 0x00ff;
cl                385 fs/nls/nls_cp862.c 	if (uni2charset && uni2charset[cl])
cl                386 fs/nls/nls_cp862.c 		out[0] = uni2charset[cl];
cl                338 fs/nls/nls_cp863.c 	unsigned char cl = uni & 0x00ff;
cl                345 fs/nls/nls_cp863.c 	if (uni2charset && uni2charset[cl])
cl                346 fs/nls/nls_cp863.c 		out[0] = uni2charset[cl];
cl                364 fs/nls/nls_cp864.c 	unsigned char cl = uni & 0x00ff;
cl                371 fs/nls/nls_cp864.c 	if (uni2charset && uni2charset[cl])
cl                372 fs/nls/nls_cp864.c 		out[0] = uni2charset[cl];
cl                344 fs/nls/nls_cp865.c 	unsigned char cl = uni & 0x00ff;
cl                351 fs/nls/nls_cp865.c 	if (uni2charset && uni2charset[cl])
cl                352 fs/nls/nls_cp865.c 		out[0] = uni2charset[cl];
cl                262 fs/nls/nls_cp866.c 	unsigned char cl = uni & 0x00ff;
cl                269 fs/nls/nls_cp866.c 	if (uni2charset && uni2charset[cl])
cl                270 fs/nls/nls_cp866.c 		out[0] = uni2charset[cl];
cl                272 fs/nls/nls_cp869.c 	unsigned char cl = uni & 0x00ff;
cl                279 fs/nls/nls_cp869.c 	if (uni2charset && uni2charset[cl])
cl                280 fs/nls/nls_cp869.c 		out[0] = uni2charset[cl];
cl                230 fs/nls/nls_cp874.c 	unsigned char cl = uni & 0x00ff;
cl                237 fs/nls/nls_cp874.c 	if (uni2charset && uni2charset[cl])
cl                238 fs/nls/nls_cp874.c 		out[0] = uni2charset[cl];
cl               7841 fs/nls/nls_cp932.c 	unsigned char cl = uni&0xFF;
cl               7847 fs/nls/nls_cp932.c 	if (ch == 0xFF && 0x61 <= cl && cl <= 0x9F) {
cl               7848 fs/nls/nls_cp932.c 		out[0] = cl + 0x40;
cl               7856 fs/nls/nls_cp932.c 		out[0] = uni2charset[cl*2];
cl               7857 fs/nls/nls_cp932.c 		out[1] = uni2charset[cl*2+1];
cl               7862 fs/nls/nls_cp932.c 		if (cl <= 0x7F) {
cl               7863 fs/nls/nls_cp932.c 			out[0] = cl;
cl               7865 fs/nls/nls_cp932.c 		} else if (0xA0 <= cl) {
cl               7866 fs/nls/nls_cp932.c 			out[0] = u2c_00hi[cl - 0xA0][0];
cl               7867 fs/nls/nls_cp932.c 			out[1] = u2c_00hi[cl - 0xA0][1];
cl               7880 fs/nls/nls_cp932.c 	unsigned char ch, cl;
cl               7898 fs/nls/nls_cp932.c 	cl = rawstring[1];
cl               7900 fs/nls/nls_cp932.c 	if (charset2uni && cl) {
cl               7901 fs/nls/nls_cp932.c 		*uni = charset2uni[cl];
cl               11004 fs/nls/nls_cp936.c 	unsigned char cl = uni&0xFF;
cl               11018 fs/nls/nls_cp936.c 		out0 = u2c_00[cl*2];
cl               11019 fs/nls/nls_cp936.c 		out1 = u2c_00[cl*2+1];
cl               11021 fs/nls/nls_cp936.c 			if (cl<0x80) {
cl               11022 fs/nls/nls_cp936.c 				out[0] = cl;
cl               11039 fs/nls/nls_cp936.c 		out[0] = uni2charset[cl*2];
cl               11040 fs/nls/nls_cp936.c 		out[1] = uni2charset[cl*2+1];
cl               11052 fs/nls/nls_cp936.c 	unsigned char ch, cl;
cl               11069 fs/nls/nls_cp936.c 	cl = rawstring[1];
cl               11072 fs/nls/nls_cp936.c 	if (charset2uni && cl) {
cl               11073 fs/nls/nls_cp936.c 		*uni = charset2uni[cl];
cl               13865 fs/nls/nls_cp949.c 	unsigned char cl = uni&0xFF;
cl               13877 fs/nls/nls_cp949.c 		out[0] = uni2charset[cl*2];
cl               13878 fs/nls/nls_cp949.c 		out[1] = uni2charset[cl*2+1];
cl               13882 fs/nls/nls_cp949.c 	} else if (ch==0 && cl) {
cl               13883 fs/nls/nls_cp949.c 		out[0] = cl;
cl               13895 fs/nls/nls_cp949.c 	unsigned char ch, cl;
cl               13908 fs/nls/nls_cp949.c 	cl = rawstring[1];
cl               13911 fs/nls/nls_cp949.c 	if (charset2uni && cl) {
cl               13912 fs/nls/nls_cp949.c 		*uni = charset2uni[cl];
cl               9401 fs/nls/nls_cp950.c 	unsigned char cl = uni&0xFF;
cl               9413 fs/nls/nls_cp950.c 		out[0] = uni2charset[cl*2];
cl               9414 fs/nls/nls_cp950.c 		out[1] = uni2charset[cl*2+1];
cl               9418 fs/nls/nls_cp950.c 	} else if (ch==0 && cl) {
cl               9419 fs/nls/nls_cp950.c 		out[0] = cl;
cl               9431 fs/nls/nls_cp950.c 	unsigned char ch, cl;
cl               9444 fs/nls/nls_cp950.c 	cl = rawstring[1];
cl               9447 fs/nls/nls_cp950.c 	if (charset2uni && cl) {
cl               9448 fs/nls/nls_cp950.c 		*uni = charset2uni[cl];
cl                439 fs/nls/nls_euc-jp.c 			unsigned char ch, cl;
cl                444 fs/nls/nls_euc-jp.c 			n = 3; ch = out[0]; cl = out[1];
cl                446 fs/nls/nls_euc-jp.c 			MAP_SJIS2EUC(ch, cl, 0xF5, out[1], out[2], 0xF5);
cl                214 fs/nls/nls_iso8859-1.c 	unsigned char cl = uni & 0x00ff;
cl                221 fs/nls/nls_iso8859-1.c 	if (uni2charset && uni2charset[cl])
cl                222 fs/nls/nls_iso8859-1.c 		out[0] = uni2charset[cl];
cl                242 fs/nls/nls_iso8859-13.c 	unsigned char cl = uni & 0x00ff;
cl                249 fs/nls/nls_iso8859-13.c 	if (uni2charset && uni2charset[cl])
cl                250 fs/nls/nls_iso8859-13.c 		out[0] = uni2charset[cl];
cl                298 fs/nls/nls_iso8859-14.c 	unsigned char cl = uni & 0x00ff;
cl                305 fs/nls/nls_iso8859-14.c 	if (uni2charset && uni2charset[cl])
cl                306 fs/nls/nls_iso8859-14.c 		out[0] = uni2charset[cl];
cl                264 fs/nls/nls_iso8859-15.c 	unsigned char cl = uni & 0x00ff;
cl                271 fs/nls/nls_iso8859-15.c 	if (uni2charset && uni2charset[cl])
cl                272 fs/nls/nls_iso8859-15.c 		out[0] = uni2charset[cl];
cl                265 fs/nls/nls_iso8859-2.c 	unsigned char cl = uni & 0x00ff;
cl                272 fs/nls/nls_iso8859-2.c 	if (uni2charset && uni2charset[cl])
cl                273 fs/nls/nls_iso8859-2.c 		out[0] = uni2charset[cl];
cl                265 fs/nls/nls_iso8859-3.c 	unsigned char cl = uni & 0x00ff;
cl                272 fs/nls/nls_iso8859-3.c 	if (uni2charset && uni2charset[cl])
cl                273 fs/nls/nls_iso8859-3.c 		out[0] = uni2charset[cl];
cl                265 fs/nls/nls_iso8859-4.c 	unsigned char cl = uni & 0x00ff;
cl                272 fs/nls/nls_iso8859-4.c 	if (uni2charset && uni2charset[cl])
cl                273 fs/nls/nls_iso8859-4.c 		out[0] = uni2charset[cl];
cl                229 fs/nls/nls_iso8859-5.c 	unsigned char cl = uni & 0x00ff;
cl                236 fs/nls/nls_iso8859-5.c 	if (uni2charset && uni2charset[cl])
cl                237 fs/nls/nls_iso8859-5.c 		out[0] = uni2charset[cl];
cl                220 fs/nls/nls_iso8859-6.c 	unsigned char cl = uni & 0x00ff;
cl                227 fs/nls/nls_iso8859-6.c 	if (uni2charset && uni2charset[cl])
cl                228 fs/nls/nls_iso8859-6.c 		out[0] = uni2charset[cl];
cl                274 fs/nls/nls_iso8859-7.c 	unsigned char cl = uni & 0x00ff;
cl                281 fs/nls/nls_iso8859-7.c 	if (uni2charset && uni2charset[cl])
cl                282 fs/nls/nls_iso8859-7.c 		out[0] = uni2charset[cl];
cl                229 fs/nls/nls_iso8859-9.c 	unsigned char cl = uni & 0x00ff;
cl                236 fs/nls/nls_iso8859-9.c 	if (uni2charset && uni2charset[cl])
cl                237 fs/nls/nls_iso8859-9.c 		out[0] = uni2charset[cl];
cl                280 fs/nls/nls_koi8-r.c 	unsigned char cl = uni & 0x00ff;
cl                287 fs/nls/nls_koi8-r.c 	if (uni2charset && uni2charset[cl])
cl                288 fs/nls/nls_koi8-r.c 		out[0] = uni2charset[cl];
cl                287 fs/nls/nls_koi8-u.c 	unsigned char cl = uni & 0x00ff;
cl                294 fs/nls/nls_koi8-u.c 	if (uni2charset && uni2charset[cl])
cl                295 fs/nls/nls_koi8-u.c 		out[0] = uni2charset[cl];
cl                539 fs/ocfs2/ioctl.c 	struct ocfs2_chain_list *cl = NULL;
cl                562 fs/ocfs2/ioctl.c 	cl = &(gb_dinode->id2.i_chain);
cl                568 fs/ocfs2/ioctl.c 	if (ffg->iff_chunksize > le16_to_cpu(cl->cl_cpg)) {
cl                581 fs/ocfs2/ioctl.c 	chunks_in_group = le16_to_cpu(cl->cl_cpg) / ffg->iff_chunksize + 1;
cl                583 fs/ocfs2/ioctl.c 	for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i++) {
cl                584 fs/ocfs2/ioctl.c 		rec = &(cl->cl_recs[i]);
cl                385 fs/ocfs2/move_extents.c 	struct ocfs2_chain_list *cl;
cl                405 fs/ocfs2/move_extents.c 	cl = &(ac_dinode->id2.i_chain);
cl                406 fs/ocfs2/move_extents.c 	rec = &(cl->cl_recs[0]);
cl                421 fs/ocfs2/move_extents.c 	for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i++) {
cl                423 fs/ocfs2/move_extents.c 		rec = &(cl->cl_recs[i]);
cl                 89 fs/ocfs2/resize.c 	struct ocfs2_chain_list *cl = &fe->id2.i_chain;
cl                 93 fs/ocfs2/resize.c 	u16 cl_bpc = le16_to_cpu(cl->cl_bpc);
cl                 94 fs/ocfs2/resize.c 	u16 cl_cpg = le16_to_cpu(cl->cl_cpg);
cl                138 fs/ocfs2/resize.c 	cr = (&cl->cl_recs[chain]);
cl                458 fs/ocfs2/resize.c 	struct ocfs2_chain_list *cl;
cl                520 fs/ocfs2/resize.c 	cl = &fe->id2.i_chain;
cl                521 fs/ocfs2/resize.c 	cr = &cl->cl_recs[input->chain];
cl                543 fs/ocfs2/resize.c 	if (input->chain == le16_to_cpu(cl->cl_next_free_rec)) {
cl                544 fs/ocfs2/resize.c 		le16_add_cpu(&cl->cl_next_free_rec, 1);
cl                 68 fs/ocfs2/suballoc.c static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl);
cl                 75 fs/ocfs2/suballoc.c 				  struct ocfs2_chain_list *cl);
cl                146 fs/ocfs2/suballoc.c static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl)
cl                148 fs/ocfs2/suballoc.c 	return (u32)le16_to_cpu(cl->cl_cpg) * (u32)le16_to_cpu(cl->cl_bpc);
cl                322 fs/ocfs2/suballoc.c 					  struct ocfs2_chain_list *cl,
cl                334 fs/ocfs2/suballoc.c 				  le16_to_cpu(cl->cl_bpc));
cl                336 fs/ocfs2/suballoc.c 	le16_add_cpu(&bg->bg_bits, clusters * le16_to_cpu(cl->cl_bpc));
cl                338 fs/ocfs2/suballoc.c 		     clusters * le16_to_cpu(cl->cl_bpc));
cl                348 fs/ocfs2/suballoc.c 				  struct ocfs2_chain_list *cl)
cl                378 fs/ocfs2/suballoc.c 	bg->bg_next_group = cl->cl_recs[my_chain].c_blkno;
cl                381 fs/ocfs2/suballoc.c 	if (group_clusters == le16_to_cpu(cl->cl_cpg))
cl                382 fs/ocfs2/suballoc.c 		bg->bg_bits = cpu_to_le16(ocfs2_bits_per_group(cl));
cl                384 fs/ocfs2/suballoc.c 		ocfs2_bg_discontig_add_extent(osb, bg, cl, group_blkno,
cl                404 fs/ocfs2/suballoc.c static inline u16 ocfs2_find_smallest_chain(struct ocfs2_chain_list *cl)
cl                409 fs/ocfs2/suballoc.c 	while (curr < le16_to_cpu(cl->cl_count)) {
cl                410 fs/ocfs2/suballoc.c 		if (le32_to_cpu(cl->cl_recs[best].c_total) >
cl                411 fs/ocfs2/suballoc.c 		    le32_to_cpu(cl->cl_recs[curr].c_total))
cl                422 fs/ocfs2/suballoc.c 			       struct ocfs2_chain_list *cl)
cl                428 fs/ocfs2/suballoc.c 	unsigned int alloc_rec = ocfs2_find_smallest_chain(cl);
cl                431 fs/ocfs2/suballoc.c 				      le16_to_cpu(cl->cl_cpg), &bit_off,
cl                453 fs/ocfs2/suballoc.c 					bg_blkno, num_bits, alloc_rec, cl);
cl                487 fs/ocfs2/suballoc.c 					    struct ocfs2_chain_list *cl,
cl                494 fs/ocfs2/suballoc.c 	unsigned int needed = le16_to_cpu(cl->cl_cpg) -
cl                495 fs/ocfs2/suballoc.c 			 le16_to_cpu(bg->bg_bits) / le16_to_cpu(cl->cl_bpc);
cl                522 fs/ocfs2/suballoc.c 		ocfs2_bg_discontig_add_extent(osb, bg, cl, p_blkno,
cl                526 fs/ocfs2/suballoc.c 		needed = le16_to_cpu(cl->cl_cpg) -
cl                527 fs/ocfs2/suballoc.c 			 le16_to_cpu(bg->bg_bits) / le16_to_cpu(cl->cl_bpc);
cl                579 fs/ocfs2/suballoc.c 				  struct ocfs2_chain_list *cl)
cl                584 fs/ocfs2/suballoc.c 	unsigned int min_bits = le16_to_cpu(cl->cl_cpg) >> 1;
cl                586 fs/ocfs2/suballoc.c 	unsigned int alloc_rec = ocfs2_find_smallest_chain(cl);
cl                633 fs/ocfs2/suballoc.c 					bg_blkno, num_bits, alloc_rec, cl);
cl                640 fs/ocfs2/suballoc.c 						  bg_bh, ac, cl, min_bits);
cl                662 fs/ocfs2/suballoc.c 	struct ocfs2_chain_list *cl;
cl                671 fs/ocfs2/suballoc.c 	cl = &fe->id2.i_chain;
cl                673 fs/ocfs2/suballoc.c 						   le16_to_cpu(cl->cl_cpg),
cl                682 fs/ocfs2/suballoc.c 						 le16_to_cpu(cl->cl_cpg));
cl                698 fs/ocfs2/suballoc.c 					       ac, cl);
cl                702 fs/ocfs2/suballoc.c 							  ac, cl);
cl                720 fs/ocfs2/suballoc.c 	le32_add_cpu(&cl->cl_recs[alloc_rec].c_free,
cl                722 fs/ocfs2/suballoc.c 	le32_add_cpu(&cl->cl_recs[alloc_rec].c_total,
cl                724 fs/ocfs2/suballoc.c 	cl->cl_recs[alloc_rec].c_blkno = bg->bg_blkno;
cl                725 fs/ocfs2/suballoc.c 	if (le16_to_cpu(cl->cl_next_free_rec) < le16_to_cpu(cl->cl_count))
cl                726 fs/ocfs2/suballoc.c 		le16_add_cpu(&cl->cl_next_free_rec, 1);
cl                731 fs/ocfs2/suballoc.c 	le32_add_cpu(&fe->i_clusters, le16_to_cpu(cl->cl_cpg));
cl               1382 fs/ocfs2/suballoc.c static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl)
cl               1386 fs/ocfs2/suballoc.c 	BUG_ON(!cl->cl_next_free_rec);
cl               1389 fs/ocfs2/suballoc.c 	while (curr < le16_to_cpu(cl->cl_next_free_rec)) {
cl               1390 fs/ocfs2/suballoc.c 		if (le32_to_cpu(cl->cl_recs[curr].c_free) >
cl               1391 fs/ocfs2/suballoc.c 		    le32_to_cpu(cl->cl_recs[best].c_free))
cl               1396 fs/ocfs2/suballoc.c 	BUG_ON(best >= le16_to_cpu(cl->cl_next_free_rec));
cl               1585 fs/ocfs2/suballoc.c 	struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &di->id2.i_chain;
cl               1596 fs/ocfs2/suballoc.c 	le32_add_cpu(&cl->cl_recs[chain].c_free, -num_bits);
cl               1610 fs/ocfs2/suballoc.c 	struct ocfs2_chain_list *cl;
cl               1612 fs/ocfs2/suballoc.c 	cl = (struct ocfs2_chain_list *)&di->id2.i_chain;
cl               1615 fs/ocfs2/suballoc.c 	le32_add_cpu(&cl->cl_recs[chain].c_free, num_bits);
cl               1620 fs/ocfs2/suballoc.c 					 struct ocfs2_chain_list *cl)
cl               1622 fs/ocfs2/suballoc.c 	unsigned int bpc = le16_to_cpu(cl->cl_bpc);
cl               1645 fs/ocfs2/suballoc.c 	struct ocfs2_chain_list *cl = &di->id2.i_chain;
cl               1660 fs/ocfs2/suballoc.c 		if (ocfs2_bg_discontig_fix_by_rec(res, rec, cl)) {
cl               1748 fs/ocfs2/suballoc.c 	struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &fe->id2.i_chain;
cl               1757 fs/ocfs2/suballoc.c 					     le64_to_cpu(cl->cl_recs[chain].c_blkno),
cl               1885 fs/ocfs2/suballoc.c 	struct ocfs2_chain_list *cl;
cl               1924 fs/ocfs2/suballoc.c 	cl = (struct ocfs2_chain_list *) &fe->id2.i_chain;
cl               1926 fs/ocfs2/suballoc.c 	victim = ocfs2_find_victim_chain(cl);
cl               1950 fs/ocfs2/suballoc.c 	for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i ++) {
cl               1953 fs/ocfs2/suballoc.c 		if (!cl->cl_recs[i].c_free)
cl               2457 fs/ocfs2/suballoc.c 	struct ocfs2_chain_list *cl = &fe->id2.i_chain;
cl               2467 fs/ocfs2/suballoc.c 	BUG_ON((count + start_bit) > ocfs2_bits_per_group(cl));
cl               2501 fs/ocfs2/suballoc.c 	le32_add_cpu(&cl->cl_recs[le16_to_cpu(group->bg_chain)].c_free,
cl                 14 include/acpi/pcc.h extern struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
cl                 18 include/acpi/pcc.h static inline struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
cl                110 include/linux/ceph/mon_client.h extern int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl);
cl                 38 include/linux/clkdev.h void clkdev_add(struct clk_lookup *cl);
cl                 39 include/linux/clkdev.h void clkdev_drop(struct clk_lookup *cl);
cl                 19 include/linux/firmware/imx/dsp.h 	struct mbox_client cl;
cl                138 include/linux/hsi/hsi.h static inline void hsi_client_set_drvdata(struct hsi_client *cl, void *data)
cl                140 include/linux/hsi/hsi.h 	dev_set_drvdata(&cl->device, data);
cl                143 include/linux/hsi/hsi.h static inline void *hsi_client_drvdata(struct hsi_client *cl)
cl                145 include/linux/hsi/hsi.h 	return dev_get_drvdata(&cl->device);
cl                148 include/linux/hsi/hsi.h int hsi_register_port_event(struct hsi_client *cl,
cl                150 include/linux/hsi/hsi.h int hsi_unregister_port_event(struct hsi_client *cl);
cl                187 include/linux/hsi/hsi.h 	struct hsi_client	*cl;
cl                230 include/linux/hsi/hsi.h 	int				(*setup)(struct hsi_client *cl);
cl                231 include/linux/hsi/hsi.h 	int				(*flush)(struct hsi_client *cl);
cl                232 include/linux/hsi/hsi.h 	int				(*start_tx)(struct hsi_client *cl);
cl                233 include/linux/hsi/hsi.h 	int				(*stop_tx)(struct hsi_client *cl);
cl                234 include/linux/hsi/hsi.h 	int				(*release)(struct hsi_client *cl);
cl                240 include/linux/hsi/hsi.h #define hsi_get_port(cl) to_hsi_port((cl)->device.parent)
cl                243 include/linux/hsi/hsi.h int hsi_claim_port(struct hsi_client *cl, unsigned int share);
cl                244 include/linux/hsi/hsi.h void hsi_release_port(struct hsi_client *cl);
cl                246 include/linux/hsi/hsi.h static inline int hsi_port_claimed(struct hsi_client *cl)
cl                248 include/linux/hsi/hsi.h 	return cl->pclaimed;
cl                319 include/linux/hsi/hsi.h int hsi_async(struct hsi_client *cl, struct hsi_msg *msg);
cl                321 include/linux/hsi/hsi.h int hsi_get_channel_id_by_name(struct hsi_client *cl, char *name);
cl                329 include/linux/hsi/hsi.h static inline unsigned int hsi_id(struct hsi_client *cl)
cl                331 include/linux/hsi/hsi.h 	return	to_hsi_controller(cl->device.parent->parent)->id;
cl                340 include/linux/hsi/hsi.h static inline unsigned int hsi_port_id(struct hsi_client *cl)
cl                342 include/linux/hsi/hsi.h 	return	to_hsi_port(cl->device.parent)->num;
cl                354 include/linux/hsi/hsi.h static inline int hsi_setup(struct hsi_client *cl)
cl                356 include/linux/hsi/hsi.h 	if (!hsi_port_claimed(cl))
cl                358 include/linux/hsi/hsi.h 	return	hsi_get_port(cl)->setup(cl);
cl                370 include/linux/hsi/hsi.h static inline int hsi_flush(struct hsi_client *cl)
cl                372 include/linux/hsi/hsi.h 	if (!hsi_port_claimed(cl))
cl                374 include/linux/hsi/hsi.h 	return hsi_get_port(cl)->flush(cl);
cl                384 include/linux/hsi/hsi.h static inline int hsi_async_read(struct hsi_client *cl, struct hsi_msg *msg)
cl                387 include/linux/hsi/hsi.h 	return hsi_async(cl, msg);
cl                397 include/linux/hsi/hsi.h static inline int hsi_async_write(struct hsi_client *cl, struct hsi_msg *msg)
cl                400 include/linux/hsi/hsi.h 	return hsi_async(cl, msg);
cl                409 include/linux/hsi/hsi.h static inline int hsi_start_tx(struct hsi_client *cl)
cl                411 include/linux/hsi/hsi.h 	if (!hsi_port_claimed(cl))
cl                413 include/linux/hsi/hsi.h 	return hsi_get_port(cl)->start_tx(cl);
cl                422 include/linux/hsi/hsi.h static inline int hsi_stop_tx(struct hsi_client *cl)
cl                424 include/linux/hsi/hsi.h 	if (!hsi_port_claimed(cl))
cl                426 include/linux/hsi/hsi.h 	return hsi_get_port(cl)->stop_tx(cl);
cl                 64 include/linux/intel-ish-client-if.h 	struct ishtp_cl *cl;
cl                 84 include/linux/intel-ish-client-if.h void ishtp_cl_free(struct ishtp_cl *cl);
cl                 85 include/linux/intel-ish-client-if.h int ishtp_cl_link(struct ishtp_cl *cl);
cl                 86 include/linux/intel-ish-client-if.h void ishtp_cl_unlink(struct ishtp_cl *cl);
cl                 87 include/linux/intel-ish-client-if.h int ishtp_cl_disconnect(struct ishtp_cl *cl);
cl                 88 include/linux/intel-ish-client-if.h int ishtp_cl_connect(struct ishtp_cl *cl);
cl                 89 include/linux/intel-ish-client-if.h int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length);
cl                 90 include/linux/intel-ish-client-if.h int ishtp_cl_flush_queues(struct ishtp_cl *cl);
cl                 92 include/linux/intel-ish-client-if.h bool ishtp_cl_tx_empty(struct ishtp_cl *cl);
cl                 93 include/linux/intel-ish-client-if.h struct ishtp_cl_rb *ishtp_cl_rx_get_rb(struct ishtp_cl *cl);
cl                 94 include/linux/intel-ish-client-if.h void *ishtp_get_client_data(struct ishtp_cl *cl);
cl                 95 include/linux/intel-ish-client-if.h void ishtp_set_client_data(struct ishtp_cl *cl, void *data);
cl                 96 include/linux/intel-ish-client-if.h struct ishtp_device *ishtp_get_ishtp_device(struct ishtp_cl *cl);
cl                 97 include/linux/intel-ish-client-if.h void ishtp_set_tx_ring_size(struct ishtp_cl *cl, int size);
cl                 98 include/linux/intel-ish-client-if.h void ishtp_set_rx_ring_size(struct ishtp_cl *cl, int size);
cl                 99 include/linux/intel-ish-client-if.h void ishtp_set_connection_state(struct ishtp_cl *cl, int state);
cl                100 include/linux/intel-ish-client-if.h void ishtp_cl_set_fw_client_id(struct ishtp_cl *cl, int fw_client_id);
cl                 77 include/linux/mailbox/mtk-cmdq-mailbox.h 	void			*cl;
cl                 35 include/linux/mailbox_client.h 	void (*rx_callback)(struct mbox_client *cl, void *mssg);
cl                 36 include/linux/mailbox_client.h 	void (*tx_prepare)(struct mbox_client *cl, void *mssg);
cl                 37 include/linux/mailbox_client.h 	void (*tx_done)(struct mbox_client *cl, void *mssg, int r);
cl                 40 include/linux/mailbox_client.h struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
cl                 42 include/linux/mailbox_client.h struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index);
cl                120 include/linux/mailbox_controller.h 	struct mbox_client *cl;
cl                 47 include/linux/mei_cl_bus.h 	struct mei_cl *cl;
cl                 20 include/linux/omap-mailbox.h struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl,
cl                974 include/linux/usb.h #define USB_DEVICE_INTERFACE_CLASS(vend, prod, cl) \
cl                979 include/linux/usb.h 	.bInterfaceClass = (cl)
cl               1022 include/linux/usb.h #define USB_DEVICE_INFO(cl, sc, pr) \
cl               1024 include/linux/usb.h 	.bDeviceClass = (cl), \
cl               1037 include/linux/usb.h #define USB_INTERFACE_INFO(cl, sc, pr) \
cl               1039 include/linux/usb.h 	.bInterfaceClass = (cl), \
cl               1057 include/linux/usb.h #define USB_DEVICE_AND_INTERFACE_INFO(vend, prod, cl, sc, pr) \
cl               1062 include/linux/usb.h 	.bInterfaceClass = (cl), \
cl               1079 include/linux/usb.h #define USB_VENDOR_AND_INTERFACE_INFO(vend, cl, sc, pr) \
cl               1083 include/linux/usb.h 	.bInterfaceClass = (cl), \
cl                139 include/net/pkt_cls.h __cls_set_class(unsigned long *clp, unsigned long cl)
cl                141 include/net/pkt_cls.h 	return xchg(clp, cl);
cl                147 include/net/pkt_cls.h 	unsigned long cl;
cl                149 include/net/pkt_cls.h 	cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
cl                150 include/net/pkt_cls.h 	cl = __cls_set_class(&r->class, cl);
cl                151 include/net/pkt_cls.h 	if (cl)
cl                152 include/net/pkt_cls.h 		q->ops->cl_ops->unbind_tcf(q, cl);
cl                173 include/net/pkt_cls.h 	unsigned long cl;
cl                175 include/net/pkt_cls.h 	if ((cl = __cls_set_class(&r->class, 0)) != 0)
cl                176 include/net/pkt_cls.h 		q->ops->cl_ops->unbind_tcf(q, cl);
cl                 19 include/net/pkt_sched.h 	int	(*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *);
cl                199 include/net/sch_generic.h 	int			(*graft)(struct Qdisc *, unsigned long cl,
cl                202 include/net/sch_generic.h 	struct Qdisc *		(*leaf)(struct Qdisc *, unsigned long cl);
cl                603 include/net/sch_generic.h 	struct Qdisc_class_common *cl;
cl                610 include/net/sch_generic.h 	hlist_for_each_entry(cl, &hash->hash[h], hnode) {
cl                611 include/net/sch_generic.h 		if (cl->classid == id)
cl                612 include/net/sch_generic.h 			return cl;
cl                336 include/soc/fsl/qman.h 	u8 cl; /* _res[6-7], as[4-5], ds[2-3], cs[0-1] */
cl                462 include/soc/fsl/qman.h 	st->cl = ((as & QM_FQD_XS_MASK) << QM_FQD_AS_OFF) |
cl                469 include/soc/fsl/qman.h 	return fqd->context_a.stashing.cl;
cl                154 include/soc/nps/common.h 	u32 cl = NPS_CPU_TO_CLUSTER_NUM(cpu);
cl                157 include/soc/nps/common.h 	reg_address.cl_x  = (cl >> 2) & 0x3;
cl                158 include/soc/nps/common.h 	reg_address.cl_y  = cl & 0x3;
cl                927 net/9p/trans_fd.c 	struct sockaddr_in cl;
cl                930 net/9p/trans_fd.c 	memset(&cl, 0, sizeof(cl));
cl                931 net/9p/trans_fd.c 	cl.sin_family = AF_INET;
cl                932 net/9p/trans_fd.c 	cl.sin_addr.s_addr = INADDR_ANY;
cl                934 net/9p/trans_fd.c 		cl.sin_port = htons((ushort)port);
cl                935 net/9p/trans_fd.c 		err = kernel_bind(sock, (struct sockaddr *)&cl, sizeof(cl));
cl                604 net/9p/trans_rdma.c 	struct sockaddr_in cl = {
cl                611 net/9p/trans_rdma.c 		cl.sin_port = htons((ushort)port);
cl                612 net/9p/trans_rdma.c 		err = rdma_bind_addr(rdma->cm_id, (struct sockaddr *)&cl);
cl                 92 net/bluetooth/a2mp.c static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl)
cl                 97 net/bluetooth/a2mp.c 	cl[0].id = AMP_ID_BREDR;
cl                 98 net/bluetooth/a2mp.c 	cl[0].type = AMP_TYPE_BREDR;
cl                 99 net/bluetooth/a2mp.c 	cl[0].status = AMP_STATUS_BLUETOOTH_ONLY;
cl                103 net/bluetooth/a2mp.c 			cl[i].id = hdev->id;
cl                104 net/bluetooth/a2mp.c 			cl[i].type = hdev->amp_type;
cl                106 net/bluetooth/a2mp.c 				cl[i].status = hdev->amp_status;
cl                108 net/bluetooth/a2mp.c 				cl[i].status = AMP_STATUS_POWERED_DOWN;
cl                170 net/bluetooth/a2mp.c 	len = struct_size(rsp, cl, num_ctrl);
cl                180 net/bluetooth/a2mp.c 	__a2mp_add_cl(mgr, rsp->cl);
cl                195 net/bluetooth/a2mp.c 	struct a2mp_cl *cl;
cl                220 net/bluetooth/a2mp.c 	cl = (void *) skb->data;
cl                221 net/bluetooth/a2mp.c 	while (len >= sizeof(*cl)) {
cl                222 net/bluetooth/a2mp.c 		BT_DBG("Remote AMP id %d type %d status %d", cl->id, cl->type,
cl                223 net/bluetooth/a2mp.c 		       cl->status);
cl                225 net/bluetooth/a2mp.c 		if (cl->id != AMP_ID_BREDR && cl->type != AMP_TYPE_BREDR) {
cl                229 net/bluetooth/a2mp.c 			req.id = cl->id;
cl                234 net/bluetooth/a2mp.c 		len -= sizeof(*cl);
cl                235 net/bluetooth/a2mp.c 		cl = skb_pull(skb, sizeof(*cl));
cl                270 net/bluetooth/a2mp.c 	struct a2mp_cl *cl = (void *) skb->data;
cl                272 net/bluetooth/a2mp.c 	while (skb->len >= sizeof(*cl)) {
cl                273 net/bluetooth/a2mp.c 		BT_DBG("Controller id %d type %d status %d", cl->id, cl->type,
cl                274 net/bluetooth/a2mp.c 		       cl->status);
cl                275 net/bluetooth/a2mp.c 		cl = skb_pull(skb, sizeof(*cl));
cl                 65 net/bluetooth/a2mp.h 	struct a2mp_cl cl[0];
cl                 54 net/bluetooth/bnep/sock.c 	struct bnep_connlist_req cl;
cl                101 net/bluetooth/bnep/sock.c 		if (copy_from_user(&cl, argp, sizeof(cl)))
cl                104 net/bluetooth/bnep/sock.c 		if (cl.cnum <= 0)
cl                107 net/bluetooth/bnep/sock.c 		err = bnep_get_connlist(&cl);
cl                108 net/bluetooth/bnep/sock.c 		if (!err && copy_to_user(argp, &cl, sizeof(cl)))
cl                146 net/bluetooth/bnep/sock.c 		struct bnep_connlist_req cl;
cl                151 net/bluetooth/bnep/sock.c 		if (get_user(cl.cnum, p) || get_user(uci, p + 1))
cl                154 net/bluetooth/bnep/sock.c 		cl.ci = compat_ptr(uci);
cl                156 net/bluetooth/bnep/sock.c 		if (cl.cnum <= 0)
cl                159 net/bluetooth/bnep/sock.c 		err = bnep_get_connlist(&cl);
cl                161 net/bluetooth/bnep/sock.c 		if (!err && put_user(cl.cnum, p))
cl                 70 net/bluetooth/cmtp/sock.c 	struct cmtp_connlist_req cl;
cl                113 net/bluetooth/cmtp/sock.c 		if (copy_from_user(&cl, argp, sizeof(cl)))
cl                116 net/bluetooth/cmtp/sock.c 		if (cl.cnum <= 0)
cl                119 net/bluetooth/cmtp/sock.c 		err = cmtp_get_connlist(&cl);
cl                120 net/bluetooth/cmtp/sock.c 		if (!err && copy_to_user(argp, &cl, sizeof(cl)))
cl                149 net/bluetooth/cmtp/sock.c 		struct cmtp_connlist_req cl;
cl                154 net/bluetooth/cmtp/sock.c 		if (get_user(cl.cnum, p) || get_user(uci, p + 1))
cl                157 net/bluetooth/cmtp/sock.c 		cl.ci = compat_ptr(uci);
cl                159 net/bluetooth/cmtp/sock.c 		if (cl.cnum <= 0)
cl                162 net/bluetooth/cmtp/sock.c 		err = cmtp_get_connlist(&cl);
cl                164 net/bluetooth/cmtp/sock.c 		if (!err && put_user(cl.cnum, p))
cl               1544 net/bluetooth/hci_conn.c 	struct hci_conn_list_req req, *cl;
cl               1557 net/bluetooth/hci_conn.c 	cl = kmalloc(size, GFP_KERNEL);
cl               1558 net/bluetooth/hci_conn.c 	if (!cl)
cl               1563 net/bluetooth/hci_conn.c 		kfree(cl);
cl               1567 net/bluetooth/hci_conn.c 	ci = cl->conn_info;
cl               1582 net/bluetooth/hci_conn.c 	cl->dev_id = hdev->id;
cl               1583 net/bluetooth/hci_conn.c 	cl->conn_num = n;
cl               1588 net/bluetooth/hci_conn.c 	err = copy_to_user(arg, cl, size);
cl               1589 net/bluetooth/hci_conn.c 	kfree(cl);
cl                 53 net/bluetooth/hidp/sock.c 	struct hidp_connlist_req cl;
cl                 99 net/bluetooth/hidp/sock.c 		if (copy_from_user(&cl, argp, sizeof(cl)))
cl                102 net/bluetooth/hidp/sock.c 		if (cl.cnum <= 0)
cl                105 net/bluetooth/hidp/sock.c 		err = hidp_get_connlist(&cl);
cl                106 net/bluetooth/hidp/sock.c 		if (!err && copy_to_user(argp, &cl, sizeof(cl)))
cl                153 net/bluetooth/hidp/sock.c 		struct hidp_connlist_req cl;
cl                157 net/bluetooth/hidp/sock.c 		if (get_user(cl.cnum, p) || get_user(uci, p + 1))
cl                160 net/bluetooth/hidp/sock.c 		cl.ci = compat_ptr(uci);
cl                162 net/bluetooth/hidp/sock.c 		if (cl.cnum <= 0)
cl                165 net/bluetooth/hidp/sock.c 		err = hidp_get_connlist(&cl);
cl                167 net/bluetooth/hidp/sock.c 		if (!err && put_user(cl.cnum, p))
cl               1039 net/ceph/mon_client.c int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
cl               1045 net/ceph/mon_client.c 	monc->client = cl;
cl               1055 net/ceph/mon_client.c 	monc->auth = ceph_auth_init(cl->options->name,
cl               1056 net/ceph/mon_client.c 				    cl->options->key);
cl                264 net/core/flow_dissector.c 	struct nf_conn_labels *cl;
cl                287 net/core/flow_dissector.c 	cl = nf_ct_labels_find(ct);
cl                288 net/core/flow_dissector.c 	if (cl)
cl                289 net/core/flow_dissector.c 		memcpy(key->ct_labels, cl->bits, sizeof(key->ct_labels));
cl                 61 net/netfilter/ipvs/ip_vs_wrr.c 	struct ip_vs_dest *cl;	/* current dest or head */
cl                117 net/netfilter/ipvs/ip_vs_wrr.c 	mark->cl = list_entry(&svc->destinations, struct ip_vs_dest, n_list);
cl                144 net/netfilter/ipvs/ip_vs_wrr.c 	mark->cl = list_entry(&svc->destinations, struct ip_vs_dest, n_list);
cl                170 net/netfilter/ipvs/ip_vs_wrr.c 	dest = mark->cl;
cl                218 net/netfilter/ipvs/ip_vs_wrr.c 	mark->cl = dest;
cl                225 net/netfilter/ipvs/ip_vs_wrr.c 	mark->cl = dest;
cl                231 net/netfilter/ipvs/ip_vs_wrr.c 	mark->cl = dest;
cl                167 net/openvswitch/conntrack.c 	struct nf_conn_labels *cl = ct ? nf_ct_labels_find(ct) : NULL;
cl                169 net/openvswitch/conntrack.c 	if (cl)
cl                170 net/openvswitch/conntrack.c 		memcpy(labels, cl->bits, OVS_CT_LABELS_LEN);
cl                353 net/openvswitch/conntrack.c 	struct nf_conn_labels *cl;
cl                355 net/openvswitch/conntrack.c 	cl = nf_ct_labels_find(ct);
cl                356 net/openvswitch/conntrack.c 	if (!cl) {
cl                358 net/openvswitch/conntrack.c 		cl = nf_ct_labels_find(ct);
cl                361 net/openvswitch/conntrack.c 	return cl;
cl                372 net/openvswitch/conntrack.c 	struct nf_conn_labels *cl, *master_cl;
cl                381 net/openvswitch/conntrack.c 	cl = ovs_ct_get_conn_labels(ct);
cl                382 net/openvswitch/conntrack.c 	if (!cl)
cl                387 net/openvswitch/conntrack.c 		*cl = *master_cl;
cl                390 net/openvswitch/conntrack.c 		u32 *dst = (u32 *)cl->bits;
cl                404 net/openvswitch/conntrack.c 	memcpy(&key->ct.labels, cl->bits, OVS_CT_LABELS_LEN);
cl                413 net/openvswitch/conntrack.c 	struct nf_conn_labels *cl;
cl                416 net/openvswitch/conntrack.c 	cl = ovs_ct_get_conn_labels(ct);
cl                417 net/openvswitch/conntrack.c 	if (!cl)
cl                426 net/openvswitch/conntrack.c 	memcpy(&key->ct.labels, cl->bits, OVS_CT_LABELS_LEN);
cl               1227 net/openvswitch/flow_netlink.c 		const struct ovs_key_ct_labels *cl;
cl               1229 net/openvswitch/flow_netlink.c 		cl = nla_data(a[OVS_KEY_ATTR_CT_LABELS]);
cl               1230 net/openvswitch/flow_netlink.c 		SW_FLOW_KEY_MEMCPY(match, ct.labels, cl->ct_labels,
cl               1231 net/openvswitch/flow_netlink.c 				   sizeof(*cl), is_mask);
cl               1135 net/sched/cls_api.c static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
cl               1145 net/sched/cls_api.c 		*cl = cops->find(q, parent);
cl               1146 net/sched/cls_api.c 		if (*cl == 0) {
cl               1156 net/sched/cls_api.c 					  unsigned long cl, int ifindex,
cl               1171 net/sched/cls_api.c 		block = cops->tcf_block(q, cl, extack);
cl               1230 net/sched/cls_api.c 					u32 *parent, unsigned long *cl,
cl               1243 net/sched/cls_api.c 	err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
cl               1247 net/sched/cls_api.c 	block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
cl               1936 net/sched/cls_api.c 	unsigned long cl;
cl               1959 net/sched/cls_api.c 	cl = 0;
cl               1998 net/sched/cls_api.c 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
cl               2002 net/sched/cls_api.c 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
cl               2104 net/sched/cls_api.c 	err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
cl               2162 net/sched/cls_api.c 	unsigned long cl = 0;
cl               2207 net/sched/cls_api.c 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
cl               2211 net/sched/cls_api.c 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
cl               2322 net/sched/cls_api.c 	unsigned long cl = 0;
cl               2363 net/sched/cls_api.c 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
cl               2367 net/sched/cls_api.c 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
cl               2544 net/sched/cls_api.c 		unsigned long cl = 0;
cl               2563 net/sched/cls_api.c 			cl = cops->find(q, tcm->tcm_parent);
cl               2564 net/sched/cls_api.c 			if (cl == 0)
cl               2567 net/sched/cls_api.c 		block = cops->tcf_block(q, cl, NULL);
cl               2770 net/sched/cls_api.c 	unsigned long cl;
cl               2785 net/sched/cls_api.c 	cl = 0;
cl               2787 net/sched/cls_api.c 	block = tcf_block_find(net, &q, &parent, &cl,
cl               2929 net/sched/cls_api.c 		unsigned long cl = 0;
cl               2950 net/sched/cls_api.c 			cl = cops->find(q, tcm->tcm_parent);
cl               2951 net/sched/cls_api.c 			if (cl == 0)
cl               2954 net/sched/cls_api.c 		block = cops->tcf_block(q, cl, NULL);
cl                266 net/sched/cls_basic.c static void basic_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
cl                272 net/sched/cls_basic.c 		if (cl)
cl                634 net/sched/cls_bpf.c static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl,
cl                640 net/sched/cls_bpf.c 		if (cl)
cl               2516 net/sched/cls_flower.c static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
cl               2522 net/sched/cls_flower.c 		if (cl)
cl                422 net/sched/cls_fw.c static void fw_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
cl                428 net/sched/cls_fw.c 		if (cl)
cl                397 net/sched/cls_matchall.c static void mall_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
cl                403 net/sched/cls_matchall.c 		if (cl)
cl                644 net/sched/cls_route.c static void route4_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
cl                650 net/sched/cls_route.c 		if (cl)
cl                739 net/sched/cls_rsvp.h static void rsvp_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
cl                745 net/sched/cls_rsvp.h 		if (cl)
cl                692 net/sched/cls_tcindex.c static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl,
cl                698 net/sched/cls_tcindex.c 		if (cl)
cl               1258 net/sched/cls_u32.c static void u32_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
cl               1264 net/sched/cls_u32.c 		if (cl)
cl                333 net/sched/sch_api.c 	unsigned long cl;
cl                338 net/sched/sch_api.c 	cl = cops->find(p, classid);
cl                340 net/sched/sch_api.c 	if (cl == 0)
cl                342 net/sched/sch_api.c 	return cops->leaf(p, cl);
cl                659 net/sched/sch_api.c 	struct Qdisc_class_common *cl;
cl                679 net/sched/sch_api.c 		hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
cl                680 net/sched/sch_api.c 			h = qdisc_class_hash(cl->classid, nmask);
cl                681 net/sched/sch_api.c 			hlist_add_head(&cl->hnode, &nhash[h]);
cl                714 net/sched/sch_api.c 			     struct Qdisc_class_common *cl)
cl                718 net/sched/sch_api.c 	INIT_HLIST_NODE(&cl->hnode);
cl                719 net/sched/sch_api.c 	h = qdisc_class_hash(cl->classid, clhash->hashmask);
cl                720 net/sched/sch_api.c 	hlist_add_head(&cl->hnode, &clhash->hash[h]);
cl                726 net/sched/sch_api.c 			     struct Qdisc_class_common *cl)
cl                728 net/sched/sch_api.c 	hlist_del(&cl->hnode);
cl                757 net/sched/sch_api.c 	unsigned long cl;
cl                791 net/sched/sch_api.c 			cl = cops->find(sch, parentid);
cl                792 net/sched/sch_api.c 			cops->qlen_notify(sch, cl);
cl               1083 net/sched/sch_api.c 		unsigned long cl;
cl               1094 net/sched/sch_api.c 		cl = cops->find(parent, classid);
cl               1095 net/sched/sch_api.c 		if (!cl) {
cl               1100 net/sched/sch_api.c 		err = cops->graft(parent, cl, new, &old, extack);
cl               1358 net/sched/sch_api.c static int check_loop_fn(struct Qdisc *q, unsigned long cl,
cl               1377 net/sched/sch_api.c check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
cl               1383 net/sched/sch_api.c 	leaf = cops->leaf(q, cl);
cl               1786 net/sched/sch_api.c 			  unsigned long cl,
cl               1809 net/sched/sch_api.c 	if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
cl               1816 net/sched/sch_api.c 	if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
cl               1833 net/sched/sch_api.c 			 unsigned long cl, int event)
cl               1843 net/sched/sch_api.c 	if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
cl               1858 net/sched/sch_api.c 			     struct Qdisc *q, unsigned long cl)
cl               1871 net/sched/sch_api.c 	if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0,
cl               1877 net/sched/sch_api.c 	err = cops->delete(q, cl);
cl               1895 net/sched/sch_api.c 	unsigned long cl;
cl               1907 net/sched/sch_api.c 		tp->ops->bind_class(n, a->classid, a->cl, q, a->base);
cl               1920 net/sched/sch_api.c static int tc_bind_class_walker(struct Qdisc *q, unsigned long cl,
cl               1928 net/sched/sch_api.c 	block = cops->tcf_block(q, cl, NULL);
cl               1942 net/sched/sch_api.c 			arg.base = cl;
cl               1943 net/sched/sch_api.c 			arg.cl = a->new_cl;
cl               1984 net/sched/sch_api.c 	unsigned long cl = 0;
cl               2065 net/sched/sch_api.c 		cl = cops->find(q, clid);
cl               2067 net/sched/sch_api.c 	if (cl == 0) {
cl               2080 net/sched/sch_api.c 			err = tclass_del_notify(net, cops, skb, n, q, cl);
cl               2085 net/sched/sch_api.c 			err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
cl               2098 net/sched/sch_api.c 	new_cl = cl;
cl               2105 net/sched/sch_api.c 		if (cl != new_cl)
cl               2118 net/sched/sch_api.c static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
cl               2123 net/sched/sch_api.c 	return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
cl                105 net/sched/sch_atm.c static struct Qdisc *atm_tc_leaf(struct Qdisc *sch, unsigned long cl)
cl                107 net/sched/sch_atm.c 	struct atm_flow_data *flow = (struct atm_flow_data *)cl;
cl                143 net/sched/sch_atm.c static void atm_tc_put(struct Qdisc *sch, unsigned long cl)
cl                146 net/sched/sch_atm.c 	struct atm_flow_data *flow = (struct atm_flow_data *)cl;
cl                271 net/sched/sch_atm.c 		unsigned long cl;
cl                275 net/sched/sch_atm.c 			cl = atm_tc_find(sch, classid);
cl                276 net/sched/sch_atm.c 			if (!cl)
cl                365 net/sched/sch_atm.c static struct tcf_block *atm_tc_tcf_block(struct Qdisc *sch, unsigned long cl,
cl                369 net/sched/sch_atm.c 	struct atm_flow_data *flow = (struct atm_flow_data *)cl;
cl                600 net/sched/sch_atm.c static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
cl                604 net/sched/sch_atm.c 	struct atm_flow_data *flow = (struct atm_flow_data *)cl;
cl               2924 net/sched/sch_cake.c static void cake_unbind(struct Qdisc *q, unsigned long cl)
cl               2928 net/sched/sch_cake.c static struct tcf_block *cake_tcf_block(struct Qdisc *sch, unsigned long cl,
cl               2933 net/sched/sch_cake.c 	if (cl)
cl               2938 net/sched/sch_cake.c static int cake_dump_class(struct Qdisc *sch, unsigned long cl,
cl               2941 net/sched/sch_cake.c 	tcm->tcm_handle |= TC_H_MIN(cl);
cl               2945 net/sched/sch_cake.c static int cake_dump_class_stats(struct Qdisc *sch, unsigned long cl,
cl               2952 net/sched/sch_cake.c 	u32 idx = cl - 1;
cl                163 net/sched/sch_cbq.c #define L2T(cl, len)	qdisc_l2t((cl)->R_tab, len)
cl                181 net/sched/sch_cbq.c 	struct cbq_class *cl;
cl                183 net/sched/sch_cbq.c 	for (cl = this->tparent; cl; cl = cl->tparent) {
cl                184 net/sched/sch_cbq.c 		struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
cl                210 net/sched/sch_cbq.c 	struct cbq_class *cl = NULL;
cl                219 net/sched/sch_cbq.c 	    (cl = cbq_class_lookup(q, prio)) != NULL)
cl                220 net/sched/sch_cbq.c 		return cl;
cl                235 net/sched/sch_cbq.c 		cl = (void *)res.class;
cl                236 net/sched/sch_cbq.c 		if (!cl) {
cl                238 net/sched/sch_cbq.c 				cl = cbq_class_lookup(q, res.classid);
cl                239 net/sched/sch_cbq.c 			else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
cl                240 net/sched/sch_cbq.c 				cl = defmap[TC_PRIO_BESTEFFORT];
cl                242 net/sched/sch_cbq.c 			if (cl == NULL)
cl                245 net/sched/sch_cbq.c 		if (cl->level >= head->level)
cl                257 net/sched/sch_cbq.c 			return cbq_reclassify(skb, cl);
cl                260 net/sched/sch_cbq.c 		if (cl->level == 0)
cl                261 net/sched/sch_cbq.c 			return cl;
cl                268 net/sched/sch_cbq.c 		head = cl;
cl                272 net/sched/sch_cbq.c 	cl = head;
cl                278 net/sched/sch_cbq.c 	    !(cl = head->defaults[prio & TC_PRIO_MAX]) &&
cl                279 net/sched/sch_cbq.c 	    !(cl = head->defaults[TC_PRIO_BESTEFFORT]))
cl                282 net/sched/sch_cbq.c 	return cl;
cl                291 net/sched/sch_cbq.c static inline void cbq_activate_class(struct cbq_class *cl)
cl                293 net/sched/sch_cbq.c 	struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
cl                294 net/sched/sch_cbq.c 	int prio = cl->cpriority;
cl                298 net/sched/sch_cbq.c 	q->active[prio] = cl;
cl                301 net/sched/sch_cbq.c 		cl->next_alive = cl_tail->next_alive;
cl                302 net/sched/sch_cbq.c 		cl_tail->next_alive = cl;
cl                304 net/sched/sch_cbq.c 		cl->next_alive = cl;
cl                319 net/sched/sch_cbq.c 	struct cbq_class *cl;
cl                323 net/sched/sch_cbq.c 		cl = cl_prev->next_alive;
cl                324 net/sched/sch_cbq.c 		if (cl == this) {
cl                325 net/sched/sch_cbq.c 			cl_prev->next_alive = cl->next_alive;
cl                326 net/sched/sch_cbq.c 			cl->next_alive = NULL;
cl                328 net/sched/sch_cbq.c 			if (cl == q->active[prio]) {
cl                330 net/sched/sch_cbq.c 				if (cl == q->active[prio]) {
cl                338 net/sched/sch_cbq.c 	} while ((cl_prev = cl) != q->active[prio]);
cl                342 net/sched/sch_cbq.c cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
cl                346 net/sched/sch_cbq.c 	if (toplevel > cl->level) {
cl                350 net/sched/sch_cbq.c 			if (cl->undertime < now) {
cl                351 net/sched/sch_cbq.c 				q->toplevel = cl->level;
cl                354 net/sched/sch_cbq.c 		} while ((cl = cl->borrow) != NULL && toplevel > cl->level);
cl                364 net/sched/sch_cbq.c 	struct cbq_class *cl = cbq_classify(skb, sch, &ret);
cl                367 net/sched/sch_cbq.c 	q->rx_class = cl;
cl                369 net/sched/sch_cbq.c 	if (cl == NULL) {
cl                376 net/sched/sch_cbq.c 	ret = qdisc_enqueue(skb, cl->q, to_free);
cl                379 net/sched/sch_cbq.c 		cbq_mark_toplevel(q, cl);
cl                380 net/sched/sch_cbq.c 		if (!cl->next_alive)
cl                381 net/sched/sch_cbq.c 			cbq_activate_class(cl);
cl                387 net/sched/sch_cbq.c 		cbq_mark_toplevel(q, cl);
cl                388 net/sched/sch_cbq.c 		cl->qstats.drops++;
cl                394 net/sched/sch_cbq.c static void cbq_overlimit(struct cbq_class *cl)
cl                396 net/sched/sch_cbq.c 	struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
cl                397 net/sched/sch_cbq.c 	psched_tdiff_t delay = cl->undertime - q->now;
cl                399 net/sched/sch_cbq.c 	if (!cl->delayed) {
cl                400 net/sched/sch_cbq.c 		delay += cl->offtime;
cl                409 net/sched/sch_cbq.c 		if (cl->avgidle < 0)
cl                410 net/sched/sch_cbq.c 			delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
cl                411 net/sched/sch_cbq.c 		if (cl->avgidle < cl->minidle)
cl                412 net/sched/sch_cbq.c 			cl->avgidle = cl->minidle;
cl                415 net/sched/sch_cbq.c 		cl->undertime = q->now + delay;
cl                417 net/sched/sch_cbq.c 		cl->xstats.overactions++;
cl                418 net/sched/sch_cbq.c 		cl->delayed = 1;
cl                431 net/sched/sch_cbq.c 		for (b = cl->borrow; b; b = b->borrow) {
cl                447 net/sched/sch_cbq.c 	struct cbq_class *cl;
cl                455 net/sched/sch_cbq.c 		cl = cl_prev->next_alive;
cl                456 net/sched/sch_cbq.c 		if (now - cl->penalized > 0) {
cl                457 net/sched/sch_cbq.c 			cl_prev->next_alive = cl->next_alive;
cl                458 net/sched/sch_cbq.c 			cl->next_alive = NULL;
cl                459 net/sched/sch_cbq.c 			cl->cpriority = cl->priority;
cl                460 net/sched/sch_cbq.c 			cl->delayed = 0;
cl                461 net/sched/sch_cbq.c 			cbq_activate_class(cl);
cl                463 net/sched/sch_cbq.c 			if (cl == q->active[prio]) {
cl                465 net/sched/sch_cbq.c 				if (cl == q->active[prio]) {
cl                471 net/sched/sch_cbq.c 			cl = cl_prev->next_alive;
cl                472 net/sched/sch_cbq.c 		} else if (sched - cl->penalized > 0)
cl                473 net/sched/sch_cbq.c 			sched = cl->penalized;
cl                474 net/sched/sch_cbq.c 	} while ((cl_prev = cl) != q->active[prio]);
cl                529 net/sched/sch_cbq.c cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
cl                532 net/sched/sch_cbq.c 	if (cl && q->toplevel >= borrowed->level) {
cl                533 net/sched/sch_cbq.c 		if (cl->q->q.qlen > 1) {
cl                554 net/sched/sch_cbq.c 	struct cbq_class *cl = this;
cl                564 net/sched/sch_cbq.c 	for ( ; cl; cl = cl->share) {
cl                565 net/sched/sch_cbq.c 		long avgidle = cl->avgidle;
cl                568 net/sched/sch_cbq.c 		cl->bstats.packets++;
cl                569 net/sched/sch_cbq.c 		cl->bstats.bytes += len;
cl                578 net/sched/sch_cbq.c 		idle = now - cl->last;
cl                580 net/sched/sch_cbq.c 			avgidle = cl->maxidle;
cl                582 net/sched/sch_cbq.c 			idle -= L2T(cl, len);
cl                589 net/sched/sch_cbq.c 			avgidle += idle - (avgidle>>cl->ewma_log);
cl                595 net/sched/sch_cbq.c 			if (avgidle < cl->minidle)
cl                596 net/sched/sch_cbq.c 				avgidle = cl->minidle;
cl                598 net/sched/sch_cbq.c 			cl->avgidle = avgidle;
cl                608 net/sched/sch_cbq.c 			idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
cl                620 net/sched/sch_cbq.c 			idle += L2T(cl, len);
cl                622 net/sched/sch_cbq.c 			cl->undertime = now + idle;
cl                626 net/sched/sch_cbq.c 			cl->undertime = PSCHED_PASTPERFECT;
cl                627 net/sched/sch_cbq.c 			if (avgidle > cl->maxidle)
cl                628 net/sched/sch_cbq.c 				cl->avgidle = cl->maxidle;
cl                630 net/sched/sch_cbq.c 				cl->avgidle = avgidle;
cl                632 net/sched/sch_cbq.c 		if ((s64)(now - cl->last) > 0)
cl                633 net/sched/sch_cbq.c 			cl->last = now;
cl                640 net/sched/sch_cbq.c cbq_under_limit(struct cbq_class *cl)
cl                642 net/sched/sch_cbq.c 	struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
cl                643 net/sched/sch_cbq.c 	struct cbq_class *this_cl = cl;
cl                645 net/sched/sch_cbq.c 	if (cl->tparent == NULL)
cl                646 net/sched/sch_cbq.c 		return cl;
cl                648 net/sched/sch_cbq.c 	if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) {
cl                649 net/sched/sch_cbq.c 		cl->delayed = 0;
cl                650 net/sched/sch_cbq.c 		return cl;
cl                664 net/sched/sch_cbq.c 		cl = cl->borrow;
cl                665 net/sched/sch_cbq.c 		if (!cl) {
cl                670 net/sched/sch_cbq.c 		if (cl->level > q->toplevel)
cl                672 net/sched/sch_cbq.c 	} while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime);
cl                674 net/sched/sch_cbq.c 	cl->delayed = 0;
cl                675 net/sched/sch_cbq.c 	return cl;
cl                682 net/sched/sch_cbq.c 	struct cbq_class *cl_tail, *cl_prev, *cl;
cl                687 net/sched/sch_cbq.c 	cl = cl_prev->next_alive;
cl                694 net/sched/sch_cbq.c 			struct cbq_class *borrow = cl;
cl                696 net/sched/sch_cbq.c 			if (cl->q->q.qlen &&
cl                697 net/sched/sch_cbq.c 			    (borrow = cbq_under_limit(cl)) == NULL)
cl                700 net/sched/sch_cbq.c 			if (cl->deficit <= 0) {
cl                705 net/sched/sch_cbq.c 				cl->deficit += cl->quantum;
cl                709 net/sched/sch_cbq.c 			skb = cl->q->dequeue(cl->q);
cl                718 net/sched/sch_cbq.c 			cl->deficit -= qdisc_pkt_len(skb);
cl                719 net/sched/sch_cbq.c 			q->tx_class = cl;
cl                721 net/sched/sch_cbq.c 			if (borrow != cl) {
cl                724 net/sched/sch_cbq.c 				cl->xstats.borrows++;
cl                727 net/sched/sch_cbq.c 				cl->xstats.borrows += qdisc_pkt_len(skb);
cl                732 net/sched/sch_cbq.c 			if (cl->deficit <= 0) {
cl                733 net/sched/sch_cbq.c 				q->active[prio] = cl;
cl                734 net/sched/sch_cbq.c 				cl = cl->next_alive;
cl                735 net/sched/sch_cbq.c 				cl->deficit += cl->quantum;
cl                740 net/sched/sch_cbq.c 			if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
cl                744 net/sched/sch_cbq.c 				cl_prev->next_alive = cl->next_alive;
cl                745 net/sched/sch_cbq.c 				cl->next_alive = NULL;
cl                748 net/sched/sch_cbq.c 				if (cl == cl_tail) {
cl                753 net/sched/sch_cbq.c 					if (cl == cl_tail) {
cl                757 net/sched/sch_cbq.c 						if (cl->q->q.qlen)
cl                758 net/sched/sch_cbq.c 							cbq_activate_class(cl);
cl                764 net/sched/sch_cbq.c 				if (cl->q->q.qlen)
cl                765 net/sched/sch_cbq.c 					cbq_activate_class(cl);
cl                767 net/sched/sch_cbq.c 				cl = cl_prev;
cl                771 net/sched/sch_cbq.c 			cl_prev = cl;
cl                772 net/sched/sch_cbq.c 			cl = cl->next_alive;
cl                871 net/sched/sch_cbq.c 		struct cbq_class *cl;
cl                873 net/sched/sch_cbq.c 		cl = this->children;
cl                874 net/sched/sch_cbq.c 		if (cl) {
cl                876 net/sched/sch_cbq.c 				if (cl->level > level)
cl                877 net/sched/sch_cbq.c 					level = cl->level;
cl                878 net/sched/sch_cbq.c 			} while ((cl = cl->sibling) != this->children);
cl                886 net/sched/sch_cbq.c 	struct cbq_class *cl;
cl                893 net/sched/sch_cbq.c 		hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
cl                897 net/sched/sch_cbq.c 			if (cl->priority == prio) {
cl                898 net/sched/sch_cbq.c 				cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
cl                901 net/sched/sch_cbq.c 			if (cl->quantum <= 0 ||
cl                902 net/sched/sch_cbq.c 			    cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) {
cl                904 net/sched/sch_cbq.c 					cl->common.classid, cl->quantum);
cl                905 net/sched/sch_cbq.c 				cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
cl                911 net/sched/sch_cbq.c static void cbq_sync_defmap(struct cbq_class *cl)
cl                913 net/sched/sch_cbq.c 	struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
cl                914 net/sched/sch_cbq.c 	struct cbq_class *split = cl->split;
cl                922 net/sched/sch_cbq.c 		if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
cl                947 net/sched/sch_cbq.c static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask)
cl                952 net/sched/sch_cbq.c 		split = cl->split;
cl                959 net/sched/sch_cbq.c 		for (split = cl->tparent; split; split = split->tparent)
cl                967 net/sched/sch_cbq.c 	if (cl->split != split) {
cl                968 net/sched/sch_cbq.c 		cl->defmap = 0;
cl                969 net/sched/sch_cbq.c 		cbq_sync_defmap(cl);
cl                970 net/sched/sch_cbq.c 		cl->split = split;
cl                971 net/sched/sch_cbq.c 		cl->defmap = def & mask;
cl                973 net/sched/sch_cbq.c 		cl->defmap = (cl->defmap & ~mask) | (def & mask);
cl                975 net/sched/sch_cbq.c 	cbq_sync_defmap(cl);
cl                980 net/sched/sch_cbq.c 	struct cbq_class *cl, **clp;
cl                987 net/sched/sch_cbq.c 		cl = *clp;
cl                989 net/sched/sch_cbq.c 			if (cl == this) {
cl                990 net/sched/sch_cbq.c 				*clp = cl->sibling;
cl                993 net/sched/sch_cbq.c 			clp = &cl->sibling;
cl                994 net/sched/sch_cbq.c 		} while ((cl = *clp) != this->sibling);
cl               1029 net/sched/sch_cbq.c 	struct cbq_class *cl;
cl               1046 net/sched/sch_cbq.c 		hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
cl               1047 net/sched/sch_cbq.c 			qdisc_reset(cl->q);
cl               1049 net/sched/sch_cbq.c 			cl->next_alive = NULL;
cl               1050 net/sched/sch_cbq.c 			cl->undertime = PSCHED_PASTPERFECT;
cl               1051 net/sched/sch_cbq.c 			cl->avgidle = cl->maxidle;
cl               1052 net/sched/sch_cbq.c 			cl->deficit = cl->quantum;
cl               1053 net/sched/sch_cbq.c 			cl->cpriority = cl->priority;
cl               1060 net/sched/sch_cbq.c static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
cl               1063 net/sched/sch_cbq.c 		cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
cl               1064 net/sched/sch_cbq.c 		cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
cl               1067 net/sched/sch_cbq.c 		cl->ewma_log = lss->ewma_log;
cl               1069 net/sched/sch_cbq.c 		cl->avpkt = lss->avpkt;
cl               1071 net/sched/sch_cbq.c 		cl->minidle = -(long)lss->minidle;
cl               1073 net/sched/sch_cbq.c 		cl->maxidle = lss->maxidle;
cl               1074 net/sched/sch_cbq.c 		cl->avgidle = lss->maxidle;
cl               1077 net/sched/sch_cbq.c 		cl->offtime = lss->offtime;
cl               1081 net/sched/sch_cbq.c static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl)
cl               1083 net/sched/sch_cbq.c 	q->nclasses[cl->priority]--;
cl               1084 net/sched/sch_cbq.c 	q->quanta[cl->priority] -= cl->weight;
cl               1085 net/sched/sch_cbq.c 	cbq_normalize_quanta(q, cl->priority);
cl               1088 net/sched/sch_cbq.c static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl)
cl               1090 net/sched/sch_cbq.c 	q->nclasses[cl->priority]++;
cl               1091 net/sched/sch_cbq.c 	q->quanta[cl->priority] += cl->weight;
cl               1092 net/sched/sch_cbq.c 	cbq_normalize_quanta(q, cl->priority);
cl               1095 net/sched/sch_cbq.c static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
cl               1097 net/sched/sch_cbq.c 	struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
cl               1100 net/sched/sch_cbq.c 		cl->allot = wrr->allot;
cl               1102 net/sched/sch_cbq.c 		cl->weight = wrr->weight;
cl               1104 net/sched/sch_cbq.c 		cl->priority = wrr->priority - 1;
cl               1105 net/sched/sch_cbq.c 		cl->cpriority = cl->priority;
cl               1106 net/sched/sch_cbq.c 		if (cl->priority >= cl->priority2)
cl               1107 net/sched/sch_cbq.c 			cl->priority2 = TC_CBQ_MAXPRIO - 1;
cl               1110 net/sched/sch_cbq.c 	cbq_addprio(q, cl);
cl               1114 net/sched/sch_cbq.c static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt)
cl               1116 net/sched/sch_cbq.c 	cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange);
cl               1232 net/sched/sch_cbq.c static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
cl               1236 net/sched/sch_cbq.c 	if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
cl               1245 net/sched/sch_cbq.c static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
cl               1251 net/sched/sch_cbq.c 	if (cl->borrow == NULL)
cl               1253 net/sched/sch_cbq.c 	if (cl->share == NULL)
cl               1255 net/sched/sch_cbq.c 	opt.ewma_log = cl->ewma_log;
cl               1256 net/sched/sch_cbq.c 	opt.level = cl->level;
cl               1257 net/sched/sch_cbq.c 	opt.avpkt = cl->avpkt;
cl               1258 net/sched/sch_cbq.c 	opt.maxidle = cl->maxidle;
cl               1259 net/sched/sch_cbq.c 	opt.minidle = (u32)(-cl->minidle);
cl               1260 net/sched/sch_cbq.c 	opt.offtime = cl->offtime;
cl               1271 net/sched/sch_cbq.c static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
cl               1278 net/sched/sch_cbq.c 	opt.allot = cl->allot;
cl               1279 net/sched/sch_cbq.c 	opt.priority = cl->priority + 1;
cl               1280 net/sched/sch_cbq.c 	opt.cpriority = cl->cpriority + 1;
cl               1281 net/sched/sch_cbq.c 	opt.weight = cl->weight;
cl               1291 net/sched/sch_cbq.c static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
cl               1296 net/sched/sch_cbq.c 	if (cl->split || cl->defmap) {
cl               1297 net/sched/sch_cbq.c 		opt.split = cl->split ? cl->split->common.classid : 0;
cl               1298 net/sched/sch_cbq.c 		opt.defmap = cl->defmap;
cl               1310 net/sched/sch_cbq.c static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
cl               1312 net/sched/sch_cbq.c 	if (cbq_dump_lss(skb, cl) < 0 ||
cl               1313 net/sched/sch_cbq.c 	    cbq_dump_rate(skb, cl) < 0 ||
cl               1314 net/sched/sch_cbq.c 	    cbq_dump_wrr(skb, cl) < 0 ||
cl               1315 net/sched/sch_cbq.c 	    cbq_dump_fopt(skb, cl) < 0)
cl               1350 net/sched/sch_cbq.c 	struct cbq_class *cl = (struct cbq_class *)arg;
cl               1353 net/sched/sch_cbq.c 	if (cl->tparent)
cl               1354 net/sched/sch_cbq.c 		tcm->tcm_parent = cl->tparent->common.classid;
cl               1357 net/sched/sch_cbq.c 	tcm->tcm_handle = cl->common.classid;
cl               1358 net/sched/sch_cbq.c 	tcm->tcm_info = cl->q->handle;
cl               1363 net/sched/sch_cbq.c 	if (cbq_dump_attr(skb, cl) < 0)
cl               1377 net/sched/sch_cbq.c 	struct cbq_class *cl = (struct cbq_class *)arg;
cl               1380 net/sched/sch_cbq.c 	cl->xstats.avgidle = cl->avgidle;
cl               1381 net/sched/sch_cbq.c 	cl->xstats.undertime = 0;
cl               1382 net/sched/sch_cbq.c 	qdisc_qstats_qlen_backlog(cl->q, &qlen, &cl->qstats.backlog);
cl               1384 net/sched/sch_cbq.c 	if (cl->undertime != PSCHED_PASTPERFECT)
cl               1385 net/sched/sch_cbq.c 		cl->xstats.undertime = cl->undertime - q->now;
cl               1388 net/sched/sch_cbq.c 				  d, NULL, &cl->bstats) < 0 ||
cl               1389 net/sched/sch_cbq.c 	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
cl               1390 net/sched/sch_cbq.c 	    gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
cl               1393 net/sched/sch_cbq.c 	return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
cl               1399 net/sched/sch_cbq.c 	struct cbq_class *cl = (struct cbq_class *)arg;
cl               1403 net/sched/sch_cbq.c 					cl->common.classid, extack);
cl               1408 net/sched/sch_cbq.c 	*old = qdisc_replace(sch, new, &cl->q);
cl               1414 net/sched/sch_cbq.c 	struct cbq_class *cl = (struct cbq_class *)arg;
cl               1416 net/sched/sch_cbq.c 	return cl->q;
cl               1421 net/sched/sch_cbq.c 	struct cbq_class *cl = (struct cbq_class *)arg;
cl               1423 net/sched/sch_cbq.c 	cbq_deactivate_class(cl);
cl               1433 net/sched/sch_cbq.c static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
cl               1437 net/sched/sch_cbq.c 	WARN_ON(cl->filters);
cl               1439 net/sched/sch_cbq.c 	tcf_block_put(cl->block);
cl               1440 net/sched/sch_cbq.c 	qdisc_put(cl->q);
cl               1441 net/sched/sch_cbq.c 	qdisc_put_rtab(cl->R_tab);
cl               1442 net/sched/sch_cbq.c 	gen_kill_estimator(&cl->rate_est);
cl               1443 net/sched/sch_cbq.c 	if (cl != &q->link)
cl               1444 net/sched/sch_cbq.c 		kfree(cl);
cl               1451 net/sched/sch_cbq.c 	struct cbq_class *cl;
cl               1463 net/sched/sch_cbq.c 		hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
cl               1464 net/sched/sch_cbq.c 			tcf_block_put(cl->block);
cl               1465 net/sched/sch_cbq.c 			cl->block = NULL;
cl               1469 net/sched/sch_cbq.c 		hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
cl               1471 net/sched/sch_cbq.c 			cbq_destroy_class(sch, cl);
cl               1482 net/sched/sch_cbq.c 	struct cbq_class *cl = (struct cbq_class *)*arg;
cl               1497 net/sched/sch_cbq.c 	if (cl) {
cl               1500 net/sched/sch_cbq.c 			if (cl->tparent &&
cl               1501 net/sched/sch_cbq.c 			    cl->tparent->common.classid != parentid) {
cl               1505 net/sched/sch_cbq.c 			if (!cl->tparent && parentid != TC_H_ROOT) {
cl               1519 net/sched/sch_cbq.c 			err = gen_replace_estimator(&cl->bstats, NULL,
cl               1520 net/sched/sch_cbq.c 						    &cl->rate_est,
cl               1534 net/sched/sch_cbq.c 		if (cl->next_alive != NULL)
cl               1535 net/sched/sch_cbq.c 			cbq_deactivate_class(cl);
cl               1538 net/sched/sch_cbq.c 			qdisc_put_rtab(cl->R_tab);
cl               1539 net/sched/sch_cbq.c 			cl->R_tab = rtab;
cl               1543 net/sched/sch_cbq.c 			cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
cl               1546 net/sched/sch_cbq.c 			cbq_rmprio(q, cl);
cl               1547 net/sched/sch_cbq.c 			cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
cl               1551 net/sched/sch_cbq.c 			cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
cl               1553 net/sched/sch_cbq.c 		if (cl->q->q.qlen)
cl               1554 net/sched/sch_cbq.c 			cbq_activate_class(cl);
cl               1610 net/sched/sch_cbq.c 	cl = kzalloc(sizeof(*cl), GFP_KERNEL);
cl               1611 net/sched/sch_cbq.c 	if (cl == NULL)
cl               1614 net/sched/sch_cbq.c 	err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
cl               1616 net/sched/sch_cbq.c 		kfree(cl);
cl               1621 net/sched/sch_cbq.c 		err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
cl               1627 net/sched/sch_cbq.c 			tcf_block_put(cl->block);
cl               1628 net/sched/sch_cbq.c 			kfree(cl);
cl               1633 net/sched/sch_cbq.c 	cl->R_tab = rtab;
cl               1635 net/sched/sch_cbq.c 	cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid,
cl               1637 net/sched/sch_cbq.c 	if (!cl->q)
cl               1638 net/sched/sch_cbq.c 		cl->q = &noop_qdisc;
cl               1640 net/sched/sch_cbq.c 		qdisc_hash_add(cl->q, true);
cl               1642 net/sched/sch_cbq.c 	cl->common.classid = classid;
cl               1643 net/sched/sch_cbq.c 	cl->tparent = parent;
cl               1644 net/sched/sch_cbq.c 	cl->qdisc = sch;
cl               1645 net/sched/sch_cbq.c 	cl->allot = parent->allot;
cl               1646 net/sched/sch_cbq.c 	cl->quantum = cl->allot;
cl               1647 net/sched/sch_cbq.c 	cl->weight = cl->R_tab->rate.rate;
cl               1650 net/sched/sch_cbq.c 	cbq_link_class(cl);
cl               1651 net/sched/sch_cbq.c 	cl->borrow = cl->tparent;
cl               1652 net/sched/sch_cbq.c 	if (cl->tparent != &q->link)
cl               1653 net/sched/sch_cbq.c 		cl->share = cl->tparent;
cl               1655 net/sched/sch_cbq.c 	cl->minidle = -0x7FFFFFFF;
cl               1656 net/sched/sch_cbq.c 	cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
cl               1657 net/sched/sch_cbq.c 	cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
cl               1658 net/sched/sch_cbq.c 	if (cl->ewma_log == 0)
cl               1659 net/sched/sch_cbq.c 		cl->ewma_log = q->link.ewma_log;
cl               1660 net/sched/sch_cbq.c 	if (cl->maxidle == 0)
cl               1661 net/sched/sch_cbq.c 		cl->maxidle = q->link.maxidle;
cl               1662 net/sched/sch_cbq.c 	if (cl->avpkt == 0)
cl               1663 net/sched/sch_cbq.c 		cl->avpkt = q->link.avpkt;
cl               1665 net/sched/sch_cbq.c 		cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
cl               1670 net/sched/sch_cbq.c 	*arg = (unsigned long)cl;
cl               1681 net/sched/sch_cbq.c 	struct cbq_class *cl = (struct cbq_class *)arg;
cl               1683 net/sched/sch_cbq.c 	if (cl->filters || cl->children || cl == &q->link)
cl               1688 net/sched/sch_cbq.c 	qdisc_purge_queue(cl->q);
cl               1690 net/sched/sch_cbq.c 	if (cl->next_alive)
cl               1691 net/sched/sch_cbq.c 		cbq_deactivate_class(cl);
cl               1693 net/sched/sch_cbq.c 	if (q->tx_borrowed == cl)
cl               1695 net/sched/sch_cbq.c 	if (q->tx_class == cl) {
cl               1700 net/sched/sch_cbq.c 	if (q->rx_class == cl)
cl               1704 net/sched/sch_cbq.c 	cbq_unlink_class(cl);
cl               1705 net/sched/sch_cbq.c 	cbq_adjust_levels(cl->tparent);
cl               1706 net/sched/sch_cbq.c 	cl->defmap = 0;
cl               1707 net/sched/sch_cbq.c 	cbq_sync_defmap(cl);
cl               1709 net/sched/sch_cbq.c 	cbq_rmprio(q, cl);
cl               1712 net/sched/sch_cbq.c 	cbq_destroy_class(sch, cl);
cl               1720 net/sched/sch_cbq.c 	struct cbq_class *cl = (struct cbq_class *)arg;
cl               1722 net/sched/sch_cbq.c 	if (cl == NULL)
cl               1723 net/sched/sch_cbq.c 		cl = &q->link;
cl               1725 net/sched/sch_cbq.c 	return cl->block;
cl               1733 net/sched/sch_cbq.c 	struct cbq_class *cl = cbq_class_lookup(q, classid);
cl               1735 net/sched/sch_cbq.c 	if (cl) {
cl               1736 net/sched/sch_cbq.c 		if (p && p->level <= cl->level)
cl               1738 net/sched/sch_cbq.c 		cl->filters++;
cl               1739 net/sched/sch_cbq.c 		return (unsigned long)cl;
cl               1746 net/sched/sch_cbq.c 	struct cbq_class *cl = (struct cbq_class *)arg;
cl               1748 net/sched/sch_cbq.c 	cl->filters--;
cl               1754 net/sched/sch_cbq.c 	struct cbq_class *cl;
cl               1761 net/sched/sch_cbq.c 		hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
cl               1766 net/sched/sch_cbq.c 			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
cl                477 net/sched/sch_cbs.c static int cbs_dump_class(struct Qdisc *sch, unsigned long cl,
cl                482 net/sched/sch_cbs.c 	if (cl != 1 || !q->qdisc)	/* only one class */
cl                 59 net/sched/sch_drr.c 	struct drr_class *cl = (struct drr_class *)*arg;
cl                 84 net/sched/sch_drr.c 	if (cl != NULL) {
cl                 86 net/sched/sch_drr.c 			err = gen_replace_estimator(&cl->bstats, NULL,
cl                 87 net/sched/sch_drr.c 						    &cl->rate_est,
cl                 99 net/sched/sch_drr.c 			cl->quantum = quantum;
cl                105 net/sched/sch_drr.c 	cl = kzalloc(sizeof(struct drr_class), GFP_KERNEL);
cl                106 net/sched/sch_drr.c 	if (cl == NULL)
cl                109 net/sched/sch_drr.c 	cl->common.classid = classid;
cl                110 net/sched/sch_drr.c 	cl->quantum	   = quantum;
cl                111 net/sched/sch_drr.c 	cl->qdisc	   = qdisc_create_dflt(sch->dev_queue,
cl                114 net/sched/sch_drr.c 	if (cl->qdisc == NULL)
cl                115 net/sched/sch_drr.c 		cl->qdisc = &noop_qdisc;
cl                117 net/sched/sch_drr.c 		qdisc_hash_add(cl->qdisc, true);
cl                120 net/sched/sch_drr.c 		err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est,
cl                126 net/sched/sch_drr.c 			qdisc_put(cl->qdisc);
cl                127 net/sched/sch_drr.c 			kfree(cl);
cl                133 net/sched/sch_drr.c 	qdisc_class_hash_insert(&q->clhash, &cl->common);
cl                138 net/sched/sch_drr.c 	*arg = (unsigned long)cl;
cl                142 net/sched/sch_drr.c static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl)
cl                144 net/sched/sch_drr.c 	gen_kill_estimator(&cl->rate_est);
cl                145 net/sched/sch_drr.c 	qdisc_put(cl->qdisc);
cl                146 net/sched/sch_drr.c 	kfree(cl);
cl                152 net/sched/sch_drr.c 	struct drr_class *cl = (struct drr_class *)arg;
cl                154 net/sched/sch_drr.c 	if (cl->filter_cnt > 0)
cl                159 net/sched/sch_drr.c 	qdisc_purge_queue(cl->qdisc);
cl                160 net/sched/sch_drr.c 	qdisc_class_hash_remove(&q->clhash, &cl->common);
cl                164 net/sched/sch_drr.c 	drr_destroy_class(sch, cl);
cl                173 net/sched/sch_drr.c static struct tcf_block *drr_tcf_block(struct Qdisc *sch, unsigned long cl,
cl                178 net/sched/sch_drr.c 	if (cl) {
cl                189 net/sched/sch_drr.c 	struct drr_class *cl = drr_find_class(sch, classid);
cl                191 net/sched/sch_drr.c 	if (cl != NULL)
cl                192 net/sched/sch_drr.c 		cl->filter_cnt++;
cl                194 net/sched/sch_drr.c 	return (unsigned long)cl;
cl                199 net/sched/sch_drr.c 	struct drr_class *cl = (struct drr_class *)arg;
cl                201 net/sched/sch_drr.c 	cl->filter_cnt--;
cl                208 net/sched/sch_drr.c 	struct drr_class *cl = (struct drr_class *)arg;
cl                212 net/sched/sch_drr.c 					cl->common.classid, NULL);
cl                217 net/sched/sch_drr.c 	*old = qdisc_replace(sch, new, &cl->qdisc);
cl                223 net/sched/sch_drr.c 	struct drr_class *cl = (struct drr_class *)arg;
cl                225 net/sched/sch_drr.c 	return cl->qdisc;
cl                230 net/sched/sch_drr.c 	struct drr_class *cl = (struct drr_class *)arg;
cl                232 net/sched/sch_drr.c 	list_del(&cl->alist);
cl                238 net/sched/sch_drr.c 	struct drr_class *cl = (struct drr_class *)arg;
cl                242 net/sched/sch_drr.c 	tcm->tcm_handle	= cl->common.classid;
cl                243 net/sched/sch_drr.c 	tcm->tcm_info	= cl->qdisc->handle;
cl                248 net/sched/sch_drr.c 	if (nla_put_u32(skb, TCA_DRR_QUANTUM, cl->quantum))
cl                260 net/sched/sch_drr.c 	struct drr_class *cl = (struct drr_class *)arg;
cl                261 net/sched/sch_drr.c 	__u32 qlen = qdisc_qlen_sum(cl->qdisc);
cl                262 net/sched/sch_drr.c 	struct Qdisc *cl_q = cl->qdisc;
cl                267 net/sched/sch_drr.c 		xstats.deficit = cl->deficit;
cl                270 net/sched/sch_drr.c 				  d, NULL, &cl->bstats) < 0 ||
cl                271 net/sched/sch_drr.c 	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
cl                281 net/sched/sch_drr.c 	struct drr_class *cl;
cl                288 net/sched/sch_drr.c 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
cl                293 net/sched/sch_drr.c 			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
cl                306 net/sched/sch_drr.c 	struct drr_class *cl;
cl                312 net/sched/sch_drr.c 		cl = drr_find_class(sch, skb->priority);
cl                313 net/sched/sch_drr.c 		if (cl != NULL)
cl                314 net/sched/sch_drr.c 			return cl;
cl                332 net/sched/sch_drr.c 		cl = (struct drr_class *)res.class;
cl                333 net/sched/sch_drr.c 		if (cl == NULL)
cl                334 net/sched/sch_drr.c 			cl = drr_find_class(sch, res.classid);
cl                335 net/sched/sch_drr.c 		return cl;
cl                345 net/sched/sch_drr.c 	struct drr_class *cl;
cl                349 net/sched/sch_drr.c 	cl = drr_classify(skb, sch, &err);
cl                350 net/sched/sch_drr.c 	if (cl == NULL) {
cl                357 net/sched/sch_drr.c 	first = !cl->qdisc->q.qlen;
cl                358 net/sched/sch_drr.c 	err = qdisc_enqueue(skb, cl->qdisc, to_free);
cl                361 net/sched/sch_drr.c 			cl->qstats.drops++;
cl                368 net/sched/sch_drr.c 		list_add_tail(&cl->alist, &q->active);
cl                369 net/sched/sch_drr.c 		cl->deficit = cl->quantum;
cl                380 net/sched/sch_drr.c 	struct drr_class *cl;
cl                387 net/sched/sch_drr.c 		cl = list_first_entry(&q->active, struct drr_class, alist);
cl                388 net/sched/sch_drr.c 		skb = cl->qdisc->ops->peek(cl->qdisc);
cl                390 net/sched/sch_drr.c 			qdisc_warn_nonwc(__func__, cl->qdisc);
cl                395 net/sched/sch_drr.c 		if (len <= cl->deficit) {
cl                396 net/sched/sch_drr.c 			cl->deficit -= len;
cl                397 net/sched/sch_drr.c 			skb = qdisc_dequeue_peeked(cl->qdisc);
cl                400 net/sched/sch_drr.c 			if (cl->qdisc->q.qlen == 0)
cl                401 net/sched/sch_drr.c 				list_del(&cl->alist);
cl                403 net/sched/sch_drr.c 			bstats_update(&cl->bstats, skb);
cl                410 net/sched/sch_drr.c 		cl->deficit += cl->quantum;
cl                411 net/sched/sch_drr.c 		list_move_tail(&cl->alist, &q->active);
cl                436 net/sched/sch_drr.c 	struct drr_class *cl;
cl                440 net/sched/sch_drr.c 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
cl                441 net/sched/sch_drr.c 			if (cl->qdisc->q.qlen)
cl                442 net/sched/sch_drr.c 				list_del(&cl->alist);
cl                443 net/sched/sch_drr.c 			qdisc_reset(cl->qdisc);
cl                453 net/sched/sch_drr.c 	struct drr_class *cl;
cl                460 net/sched/sch_drr.c 		hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
cl                462 net/sched/sch_drr.c 			drr_destroy_class(sch, cl);
cl                104 net/sched/sch_dsmark.c static void dsmark_unbind_filter(struct Qdisc *sch, unsigned long cl)
cl                191 net/sched/sch_dsmark.c static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl,
cl                426 net/sched/sch_dsmark.c static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
cl                432 net/sched/sch_dsmark.c 	pr_debug("%s(sch %p,[qdisc %p],class %ld\n", __func__, sch, p, cl);
cl                434 net/sched/sch_dsmark.c 	if (!dsmark_valid_index(p, cl))
cl                437 net/sched/sch_dsmark.c 	tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl - 1);
cl                443 net/sched/sch_dsmark.c 	if (nla_put_u8(skb, TCA_DSMARK_MASK, p->mv[cl - 1].mask) ||
cl                444 net/sched/sch_dsmark.c 	    nla_put_u8(skb, TCA_DSMARK_VALUE, p->mv[cl - 1].value))
cl                594 net/sched/sch_fq_codel.c static void fq_codel_unbind(struct Qdisc *q, unsigned long cl)
cl                598 net/sched/sch_fq_codel.c static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl,
cl                603 net/sched/sch_fq_codel.c 	if (cl)
cl                608 net/sched/sch_fq_codel.c static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
cl                611 net/sched/sch_fq_codel.c 	tcm->tcm_handle |= TC_H_MIN(cl);
cl                615 net/sched/sch_fq_codel.c static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
cl                619 net/sched/sch_fq_codel.c 	u32 idx = cl - 1;
cl                186 net/sched/sch_hfsc.c eltree_insert(struct hfsc_class *cl)
cl                188 net/sched/sch_hfsc.c 	struct rb_node **p = &cl->sched->eligible.rb_node;
cl                195 net/sched/sch_hfsc.c 		if (cl->cl_e >= cl1->cl_e)
cl                200 net/sched/sch_hfsc.c 	rb_link_node(&cl->el_node, parent, p);
cl                201 net/sched/sch_hfsc.c 	rb_insert_color(&cl->el_node, &cl->sched->eligible);
cl                205 net/sched/sch_hfsc.c eltree_remove(struct hfsc_class *cl)
cl                207 net/sched/sch_hfsc.c 	rb_erase(&cl->el_node, &cl->sched->eligible);
cl                211 net/sched/sch_hfsc.c eltree_update(struct hfsc_class *cl)
cl                213 net/sched/sch_hfsc.c 	eltree_remove(cl);
cl                214 net/sched/sch_hfsc.c 	eltree_insert(cl);
cl                221 net/sched/sch_hfsc.c 	struct hfsc_class *p, *cl = NULL;
cl                228 net/sched/sch_hfsc.c 		if (cl == NULL || p->cl_d < cl->cl_d)
cl                229 net/sched/sch_hfsc.c 			cl = p;
cl                231 net/sched/sch_hfsc.c 	return cl;
cl                251 net/sched/sch_hfsc.c vttree_insert(struct hfsc_class *cl)
cl                253 net/sched/sch_hfsc.c 	struct rb_node **p = &cl->cl_parent->vt_tree.rb_node;
cl                260 net/sched/sch_hfsc.c 		if (cl->cl_vt >= cl1->cl_vt)
cl                265 net/sched/sch_hfsc.c 	rb_link_node(&cl->vt_node, parent, p);
cl                266 net/sched/sch_hfsc.c 	rb_insert_color(&cl->vt_node, &cl->cl_parent->vt_tree);
cl                270 net/sched/sch_hfsc.c vttree_remove(struct hfsc_class *cl)
cl                272 net/sched/sch_hfsc.c 	rb_erase(&cl->vt_node, &cl->cl_parent->vt_tree);
cl                276 net/sched/sch_hfsc.c vttree_update(struct hfsc_class *cl)
cl                278 net/sched/sch_hfsc.c 	vttree_remove(cl);
cl                279 net/sched/sch_hfsc.c 	vttree_insert(cl);
cl                283 net/sched/sch_hfsc.c vttree_firstfit(struct hfsc_class *cl, u64 cur_time)
cl                288 net/sched/sch_hfsc.c 	for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) {
cl                300 net/sched/sch_hfsc.c vttree_get_minvt(struct hfsc_class *cl, u64 cur_time)
cl                303 net/sched/sch_hfsc.c 	if (cl->cl_cfmin > cur_time)
cl                306 net/sched/sch_hfsc.c 	while (cl->level > 0) {
cl                307 net/sched/sch_hfsc.c 		cl = vttree_firstfit(cl, cur_time);
cl                308 net/sched/sch_hfsc.c 		if (cl == NULL)
cl                313 net/sched/sch_hfsc.c 		if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
cl                314 net/sched/sch_hfsc.c 			cl->cl_parent->cl_cvtmin = cl->cl_vt;
cl                316 net/sched/sch_hfsc.c 	return cl;
cl                320 net/sched/sch_hfsc.c cftree_insert(struct hfsc_class *cl)
cl                322 net/sched/sch_hfsc.c 	struct rb_node **p = &cl->cl_parent->cf_tree.rb_node;
cl                329 net/sched/sch_hfsc.c 		if (cl->cl_f >= cl1->cl_f)
cl                334 net/sched/sch_hfsc.c 	rb_link_node(&cl->cf_node, parent, p);
cl                335 net/sched/sch_hfsc.c 	rb_insert_color(&cl->cf_node, &cl->cl_parent->cf_tree);
cl                339 net/sched/sch_hfsc.c cftree_remove(struct hfsc_class *cl)
cl                341 net/sched/sch_hfsc.c 	rb_erase(&cl->cf_node, &cl->cl_parent->cf_tree);
cl                345 net/sched/sch_hfsc.c cftree_update(struct hfsc_class *cl)
cl                347 net/sched/sch_hfsc.c 	cftree_remove(cl);
cl                348 net/sched/sch_hfsc.c 	cftree_insert(cl);
cl                610 net/sched/sch_hfsc.c init_ed(struct hfsc_class *cl, unsigned int next_len)
cl                615 net/sched/sch_hfsc.c 	rtsc_min(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
cl                622 net/sched/sch_hfsc.c 	cl->cl_eligible = cl->cl_deadline;
cl                623 net/sched/sch_hfsc.c 	if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
cl                624 net/sched/sch_hfsc.c 		cl->cl_eligible.dx = 0;
cl                625 net/sched/sch_hfsc.c 		cl->cl_eligible.dy = 0;
cl                629 net/sched/sch_hfsc.c 	cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
cl                630 net/sched/sch_hfsc.c 	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
cl                632 net/sched/sch_hfsc.c 	eltree_insert(cl);
cl                636 net/sched/sch_hfsc.c update_ed(struct hfsc_class *cl, unsigned int next_len)
cl                638 net/sched/sch_hfsc.c 	cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
cl                639 net/sched/sch_hfsc.c 	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
cl                641 net/sched/sch_hfsc.c 	eltree_update(cl);
cl                645 net/sched/sch_hfsc.c update_d(struct hfsc_class *cl, unsigned int next_len)
cl                647 net/sched/sch_hfsc.c 	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
cl                651 net/sched/sch_hfsc.c update_cfmin(struct hfsc_class *cl)
cl                653 net/sched/sch_hfsc.c 	struct rb_node *n = rb_first(&cl->cf_tree);
cl                657 net/sched/sch_hfsc.c 		cl->cl_cfmin = 0;
cl                661 net/sched/sch_hfsc.c 	cl->cl_cfmin = p->cl_f;
cl                665 net/sched/sch_hfsc.c init_vf(struct hfsc_class *cl, unsigned int len)
cl                674 net/sched/sch_hfsc.c 	for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
cl                675 net/sched/sch_hfsc.c 		if (go_active && cl->cl_nactive++ == 0)
cl                681 net/sched/sch_hfsc.c 			n = rb_last(&cl->cl_parent->vt_tree);
cl                690 net/sched/sch_hfsc.c 				if (cl->cl_parent->cl_cvtmin != 0)
cl                691 net/sched/sch_hfsc.c 					vt = (cl->cl_parent->cl_cvtmin + vt)/2;
cl                693 net/sched/sch_hfsc.c 				if (cl->cl_parent->cl_vtperiod !=
cl                694 net/sched/sch_hfsc.c 				    cl->cl_parentperiod || vt > cl->cl_vt)
cl                695 net/sched/sch_hfsc.c 					cl->cl_vt = vt;
cl                703 net/sched/sch_hfsc.c 				cl->cl_vt = cl->cl_parent->cl_cvtoff;
cl                704 net/sched/sch_hfsc.c 				cl->cl_parent->cl_cvtmin = 0;
cl                708 net/sched/sch_hfsc.c 			rtsc_min(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
cl                709 net/sched/sch_hfsc.c 			cl->cl_vtadj = 0;
cl                711 net/sched/sch_hfsc.c 			cl->cl_vtperiod++;  /* increment vt period */
cl                712 net/sched/sch_hfsc.c 			cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
cl                713 net/sched/sch_hfsc.c 			if (cl->cl_parent->cl_nactive == 0)
cl                714 net/sched/sch_hfsc.c 				cl->cl_parentperiod++;
cl                715 net/sched/sch_hfsc.c 			cl->cl_f = 0;
cl                717 net/sched/sch_hfsc.c 			vttree_insert(cl);
cl                718 net/sched/sch_hfsc.c 			cftree_insert(cl);
cl                720 net/sched/sch_hfsc.c 			if (cl->cl_flags & HFSC_USC) {
cl                726 net/sched/sch_hfsc.c 				rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time,
cl                727 net/sched/sch_hfsc.c 					 cl->cl_total);
cl                729 net/sched/sch_hfsc.c 				cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
cl                730 net/sched/sch_hfsc.c 						      cl->cl_total);
cl                734 net/sched/sch_hfsc.c 		f = max(cl->cl_myf, cl->cl_cfmin);
cl                735 net/sched/sch_hfsc.c 		if (f != cl->cl_f) {
cl                736 net/sched/sch_hfsc.c 			cl->cl_f = f;
cl                737 net/sched/sch_hfsc.c 			cftree_update(cl);
cl                739 net/sched/sch_hfsc.c 		update_cfmin(cl->cl_parent);
cl                744 net/sched/sch_hfsc.c update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time)
cl                749 net/sched/sch_hfsc.c 	if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC)
cl                752 net/sched/sch_hfsc.c 	for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
cl                753 net/sched/sch_hfsc.c 		cl->cl_total += len;
cl                755 net/sched/sch_hfsc.c 		if (!(cl->cl_flags & HFSC_FSC) || cl->cl_nactive == 0)
cl                758 net/sched/sch_hfsc.c 		if (go_passive && --cl->cl_nactive == 0)
cl                764 net/sched/sch_hfsc.c 		cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total) + cl->cl_vtadj;
cl                771 net/sched/sch_hfsc.c 		if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
cl                772 net/sched/sch_hfsc.c 			cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
cl                773 net/sched/sch_hfsc.c 			cl->cl_vt = cl->cl_parent->cl_cvtmin;
cl                780 net/sched/sch_hfsc.c 			if (cl->cl_vt > cl->cl_parent->cl_cvtoff)
cl                781 net/sched/sch_hfsc.c 				cl->cl_parent->cl_cvtoff = cl->cl_vt;
cl                784 net/sched/sch_hfsc.c 			vttree_remove(cl);
cl                786 net/sched/sch_hfsc.c 			cftree_remove(cl);
cl                787 net/sched/sch_hfsc.c 			update_cfmin(cl->cl_parent);
cl                793 net/sched/sch_hfsc.c 		vttree_update(cl);
cl                796 net/sched/sch_hfsc.c 		if (cl->cl_flags & HFSC_USC) {
cl                797 net/sched/sch_hfsc.c 			cl->cl_myf = rtsc_y2x(&cl->cl_ulimit, cl->cl_total);
cl                799 net/sched/sch_hfsc.c 			cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit,
cl                800 net/sched/sch_hfsc.c 							      cl->cl_total);
cl                814 net/sched/sch_hfsc.c 			if (cl->cl_myf < myf_bound) {
cl                815 net/sched/sch_hfsc.c 				delta = cur_time - cl->cl_myf;
cl                816 net/sched/sch_hfsc.c 				cl->cl_myfadj += delta;
cl                817 net/sched/sch_hfsc.c 				cl->cl_myf += delta;
cl                822 net/sched/sch_hfsc.c 		f = max(cl->cl_myf, cl->cl_cfmin);
cl                823 net/sched/sch_hfsc.c 		if (f != cl->cl_f) {
cl                824 net/sched/sch_hfsc.c 			cl->cl_f = f;
cl                825 net/sched/sch_hfsc.c 			cftree_update(cl);
cl                826 net/sched/sch_hfsc.c 			update_cfmin(cl->cl_parent);
cl                848 net/sched/sch_hfsc.c hfsc_adjust_levels(struct hfsc_class *cl)
cl                855 net/sched/sch_hfsc.c 		list_for_each_entry(p, &cl->children, siblings) {
cl                859 net/sched/sch_hfsc.c 		cl->level = level;
cl                860 net/sched/sch_hfsc.c 	} while ((cl = cl->cl_parent) != NULL);
cl                876 net/sched/sch_hfsc.c hfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc,
cl                879 net/sched/sch_hfsc.c 	sc2isc(rsc, &cl->cl_rsc);
cl                880 net/sched/sch_hfsc.c 	rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
cl                881 net/sched/sch_hfsc.c 	cl->cl_eligible = cl->cl_deadline;
cl                882 net/sched/sch_hfsc.c 	if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
cl                883 net/sched/sch_hfsc.c 		cl->cl_eligible.dx = 0;
cl                884 net/sched/sch_hfsc.c 		cl->cl_eligible.dy = 0;
cl                886 net/sched/sch_hfsc.c 	cl->cl_flags |= HFSC_RSC;
cl                890 net/sched/sch_hfsc.c hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc)
cl                892 net/sched/sch_hfsc.c 	sc2isc(fsc, &cl->cl_fsc);
cl                893 net/sched/sch_hfsc.c 	rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
cl                894 net/sched/sch_hfsc.c 	cl->cl_flags |= HFSC_FSC;
cl                898 net/sched/sch_hfsc.c hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,
cl                901 net/sched/sch_hfsc.c 	sc2isc(usc, &cl->cl_usc);
cl                902 net/sched/sch_hfsc.c 	rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total);
cl                903 net/sched/sch_hfsc.c 	cl->cl_flags |= HFSC_USC;
cl                918 net/sched/sch_hfsc.c 	struct hfsc_class *cl = (struct hfsc_class *)*arg;
cl                952 net/sched/sch_hfsc.c 	if (cl != NULL) {
cl                956 net/sched/sch_hfsc.c 			if (cl->cl_parent &&
cl                957 net/sched/sch_hfsc.c 			    cl->cl_parent->cl_common.classid != parentid)
cl                959 net/sched/sch_hfsc.c 			if (cl->cl_parent == NULL && parentid != TC_H_ROOT)
cl                965 net/sched/sch_hfsc.c 			err = gen_replace_estimator(&cl->bstats, NULL,
cl                966 net/sched/sch_hfsc.c 						    &cl->rate_est,
cl                975 net/sched/sch_hfsc.c 		old_flags = cl->cl_flags;
cl                978 net/sched/sch_hfsc.c 			hfsc_change_rsc(cl, rsc, cur_time);
cl                980 net/sched/sch_hfsc.c 			hfsc_change_fsc(cl, fsc);
cl                982 net/sched/sch_hfsc.c 			hfsc_change_usc(cl, usc, cur_time);
cl                984 net/sched/sch_hfsc.c 		if (cl->qdisc->q.qlen != 0) {
cl                985 net/sched/sch_hfsc.c 			int len = qdisc_peek_len(cl->qdisc);
cl                987 net/sched/sch_hfsc.c 			if (cl->cl_flags & HFSC_RSC) {
cl                989 net/sched/sch_hfsc.c 					update_ed(cl, len);
cl                991 net/sched/sch_hfsc.c 					init_ed(cl, len);
cl                994 net/sched/sch_hfsc.c 			if (cl->cl_flags & HFSC_FSC) {
cl                996 net/sched/sch_hfsc.c 					update_vf(cl, 0, cur_time);
cl                998 net/sched/sch_hfsc.c 					init_vf(cl, len);
cl               1024 net/sched/sch_hfsc.c 	cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL);
cl               1025 net/sched/sch_hfsc.c 	if (cl == NULL)
cl               1028 net/sched/sch_hfsc.c 	err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
cl               1030 net/sched/sch_hfsc.c 		kfree(cl);
cl               1035 net/sched/sch_hfsc.c 		err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
cl               1040 net/sched/sch_hfsc.c 			tcf_block_put(cl->block);
cl               1041 net/sched/sch_hfsc.c 			kfree(cl);
cl               1047 net/sched/sch_hfsc.c 		hfsc_change_rsc(cl, rsc, 0);
cl               1049 net/sched/sch_hfsc.c 		hfsc_change_fsc(cl, fsc);
cl               1051 net/sched/sch_hfsc.c 		hfsc_change_usc(cl, usc, 0);
cl               1053 net/sched/sch_hfsc.c 	cl->cl_common.classid = classid;
cl               1054 net/sched/sch_hfsc.c 	cl->sched     = q;
cl               1055 net/sched/sch_hfsc.c 	cl->cl_parent = parent;
cl               1056 net/sched/sch_hfsc.c 	cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
cl               1058 net/sched/sch_hfsc.c 	if (cl->qdisc == NULL)
cl               1059 net/sched/sch_hfsc.c 		cl->qdisc = &noop_qdisc;
cl               1061 net/sched/sch_hfsc.c 		qdisc_hash_add(cl->qdisc, true);
cl               1062 net/sched/sch_hfsc.c 	INIT_LIST_HEAD(&cl->children);
cl               1063 net/sched/sch_hfsc.c 	cl->vt_tree = RB_ROOT;
cl               1064 net/sched/sch_hfsc.c 	cl->cf_tree = RB_ROOT;
cl               1067 net/sched/sch_hfsc.c 	qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
cl               1068 net/sched/sch_hfsc.c 	list_add_tail(&cl->siblings, &parent->children);
cl               1076 net/sched/sch_hfsc.c 	*arg = (unsigned long)cl;
cl               1081 net/sched/sch_hfsc.c hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
cl               1085 net/sched/sch_hfsc.c 	tcf_block_put(cl->block);
cl               1086 net/sched/sch_hfsc.c 	qdisc_put(cl->qdisc);
cl               1087 net/sched/sch_hfsc.c 	gen_kill_estimator(&cl->rate_est);
cl               1088 net/sched/sch_hfsc.c 	if (cl != &q->root)
cl               1089 net/sched/sch_hfsc.c 		kfree(cl);
cl               1096 net/sched/sch_hfsc.c 	struct hfsc_class *cl = (struct hfsc_class *)arg;
cl               1098 net/sched/sch_hfsc.c 	if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root)
cl               1103 net/sched/sch_hfsc.c 	list_del(&cl->siblings);
cl               1104 net/sched/sch_hfsc.c 	hfsc_adjust_levels(cl->cl_parent);
cl               1106 net/sched/sch_hfsc.c 	qdisc_purge_queue(cl->qdisc);
cl               1107 net/sched/sch_hfsc.c 	qdisc_class_hash_remove(&q->clhash, &cl->cl_common);
cl               1111 net/sched/sch_hfsc.c 	hfsc_destroy_class(sch, cl);
cl               1119 net/sched/sch_hfsc.c 	struct hfsc_class *head, *cl;
cl               1125 net/sched/sch_hfsc.c 	    (cl = hfsc_find_class(skb->priority, sch)) != NULL)
cl               1126 net/sched/sch_hfsc.c 		if (cl->level == 0)
cl               1127 net/sched/sch_hfsc.c 			return cl;
cl               1144 net/sched/sch_hfsc.c 		cl = (struct hfsc_class *)res.class;
cl               1145 net/sched/sch_hfsc.c 		if (!cl) {
cl               1146 net/sched/sch_hfsc.c 			cl = hfsc_find_class(res.classid, sch);
cl               1147 net/sched/sch_hfsc.c 			if (!cl)
cl               1149 net/sched/sch_hfsc.c 			if (cl->level >= head->level)
cl               1153 net/sched/sch_hfsc.c 		if (cl->level == 0)
cl               1154 net/sched/sch_hfsc.c 			return cl; /* hit leaf class */
cl               1157 net/sched/sch_hfsc.c 		tcf = rcu_dereference_bh(cl->filter_list);
cl               1158 net/sched/sch_hfsc.c 		head = cl;
cl               1162 net/sched/sch_hfsc.c 	cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
cl               1163 net/sched/sch_hfsc.c 	if (cl == NULL || cl->level > 0)
cl               1166 net/sched/sch_hfsc.c 	return cl;
cl               1173 net/sched/sch_hfsc.c 	struct hfsc_class *cl = (struct hfsc_class *)arg;
cl               1175 net/sched/sch_hfsc.c 	if (cl->level > 0)
cl               1179 net/sched/sch_hfsc.c 					cl->cl_common.classid, NULL);
cl               1184 net/sched/sch_hfsc.c 	*old = qdisc_replace(sch, new, &cl->qdisc);
cl               1191 net/sched/sch_hfsc.c 	struct hfsc_class *cl = (struct hfsc_class *)arg;
cl               1193 net/sched/sch_hfsc.c 	if (cl->level == 0)
cl               1194 net/sched/sch_hfsc.c 		return cl->qdisc;
cl               1202 net/sched/sch_hfsc.c 	struct hfsc_class *cl = (struct hfsc_class *)arg;
cl               1207 net/sched/sch_hfsc.c 	update_vf(cl, 0, 0);
cl               1208 net/sched/sch_hfsc.c 	if (cl->cl_flags & HFSC_RSC)
cl               1209 net/sched/sch_hfsc.c 		eltree_remove(cl);
cl               1222 net/sched/sch_hfsc.c 	struct hfsc_class *cl = hfsc_find_class(classid, sch);
cl               1224 net/sched/sch_hfsc.c 	if (cl != NULL) {
cl               1225 net/sched/sch_hfsc.c 		if (p != NULL && p->level <= cl->level)
cl               1227 net/sched/sch_hfsc.c 		cl->filter_cnt++;
cl               1230 net/sched/sch_hfsc.c 	return (unsigned long)cl;
cl               1236 net/sched/sch_hfsc.c 	struct hfsc_class *cl = (struct hfsc_class *)arg;
cl               1238 net/sched/sch_hfsc.c 	cl->filter_cnt--;
cl               1245 net/sched/sch_hfsc.c 	struct hfsc_class *cl = (struct hfsc_class *)arg;
cl               1247 net/sched/sch_hfsc.c 	if (cl == NULL)
cl               1248 net/sched/sch_hfsc.c 		cl = &q->root;
cl               1250 net/sched/sch_hfsc.c 	return cl->block;
cl               1271 net/sched/sch_hfsc.c hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
cl               1273 net/sched/sch_hfsc.c 	if ((cl->cl_flags & HFSC_RSC) &&
cl               1274 net/sched/sch_hfsc.c 	    (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0))
cl               1277 net/sched/sch_hfsc.c 	if ((cl->cl_flags & HFSC_FSC) &&
cl               1278 net/sched/sch_hfsc.c 	    (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0))
cl               1281 net/sched/sch_hfsc.c 	if ((cl->cl_flags & HFSC_USC) &&
cl               1282 net/sched/sch_hfsc.c 	    (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0))
cl               1295 net/sched/sch_hfsc.c 	struct hfsc_class *cl = (struct hfsc_class *)arg;
cl               1298 net/sched/sch_hfsc.c 	tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->cl_common.classid :
cl               1300 net/sched/sch_hfsc.c 	tcm->tcm_handle = cl->cl_common.classid;
cl               1301 net/sched/sch_hfsc.c 	if (cl->level == 0)
cl               1302 net/sched/sch_hfsc.c 		tcm->tcm_info = cl->qdisc->handle;
cl               1307 net/sched/sch_hfsc.c 	if (hfsc_dump_curves(skb, cl) < 0)
cl               1320 net/sched/sch_hfsc.c 	struct hfsc_class *cl = (struct hfsc_class *)arg;
cl               1324 net/sched/sch_hfsc.c 	qdisc_qstats_qlen_backlog(cl->qdisc, &qlen, &cl->qstats.backlog);
cl               1325 net/sched/sch_hfsc.c 	xstats.level   = cl->level;
cl               1326 net/sched/sch_hfsc.c 	xstats.period  = cl->cl_vtperiod;
cl               1327 net/sched/sch_hfsc.c 	xstats.work    = cl->cl_total;
cl               1328 net/sched/sch_hfsc.c 	xstats.rtwork  = cl->cl_cumul;
cl               1330 net/sched/sch_hfsc.c 	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 ||
cl               1331 net/sched/sch_hfsc.c 	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
cl               1332 net/sched/sch_hfsc.c 	    gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
cl               1344 net/sched/sch_hfsc.c 	struct hfsc_class *cl;
cl               1351 net/sched/sch_hfsc.c 		hlist_for_each_entry(cl, &q->clhash.hash[i],
cl               1357 net/sched/sch_hfsc.c 			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
cl               1370 net/sched/sch_hfsc.c 	struct hfsc_class *cl;
cl               1373 net/sched/sch_hfsc.c 	cl = eltree_get_minel(q);
cl               1374 net/sched/sch_hfsc.c 	if (cl)
cl               1375 net/sched/sch_hfsc.c 		next_time = cl->cl_e;
cl               1445 net/sched/sch_hfsc.c hfsc_reset_class(struct hfsc_class *cl)
cl               1447 net/sched/sch_hfsc.c 	cl->cl_total        = 0;
cl               1448 net/sched/sch_hfsc.c 	cl->cl_cumul        = 0;
cl               1449 net/sched/sch_hfsc.c 	cl->cl_d            = 0;
cl               1450 net/sched/sch_hfsc.c 	cl->cl_e            = 0;
cl               1451 net/sched/sch_hfsc.c 	cl->cl_vt           = 0;
cl               1452 net/sched/sch_hfsc.c 	cl->cl_vtadj        = 0;
cl               1453 net/sched/sch_hfsc.c 	cl->cl_cvtmin       = 0;
cl               1454 net/sched/sch_hfsc.c 	cl->cl_cvtoff       = 0;
cl               1455 net/sched/sch_hfsc.c 	cl->cl_vtperiod     = 0;
cl               1456 net/sched/sch_hfsc.c 	cl->cl_parentperiod = 0;
cl               1457 net/sched/sch_hfsc.c 	cl->cl_f            = 0;
cl               1458 net/sched/sch_hfsc.c 	cl->cl_myf          = 0;
cl               1459 net/sched/sch_hfsc.c 	cl->cl_cfmin        = 0;
cl               1460 net/sched/sch_hfsc.c 	cl->cl_nactive      = 0;
cl               1462 net/sched/sch_hfsc.c 	cl->vt_tree = RB_ROOT;
cl               1463 net/sched/sch_hfsc.c 	cl->cf_tree = RB_ROOT;
cl               1464 net/sched/sch_hfsc.c 	qdisc_reset(cl->qdisc);
cl               1466 net/sched/sch_hfsc.c 	if (cl->cl_flags & HFSC_RSC)
cl               1467 net/sched/sch_hfsc.c 		rtsc_init(&cl->cl_deadline, &cl->cl_rsc, 0, 0);
cl               1468 net/sched/sch_hfsc.c 	if (cl->cl_flags & HFSC_FSC)
cl               1469 net/sched/sch_hfsc.c 		rtsc_init(&cl->cl_virtual, &cl->cl_fsc, 0, 0);
cl               1470 net/sched/sch_hfsc.c 	if (cl->cl_flags & HFSC_USC)
cl               1471 net/sched/sch_hfsc.c 		rtsc_init(&cl->cl_ulimit, &cl->cl_usc, 0, 0);
cl               1478 net/sched/sch_hfsc.c 	struct hfsc_class *cl;
cl               1482 net/sched/sch_hfsc.c 		hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
cl               1483 net/sched/sch_hfsc.c 			hfsc_reset_class(cl);
cl               1496 net/sched/sch_hfsc.c 	struct hfsc_class *cl;
cl               1500 net/sched/sch_hfsc.c 		hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode) {
cl               1501 net/sched/sch_hfsc.c 			tcf_block_put(cl->block);
cl               1502 net/sched/sch_hfsc.c 			cl->block = NULL;
cl               1506 net/sched/sch_hfsc.c 		hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
cl               1508 net/sched/sch_hfsc.c 			hfsc_destroy_class(sch, cl);
cl               1535 net/sched/sch_hfsc.c 	struct hfsc_class *cl;
cl               1539 net/sched/sch_hfsc.c 	cl = hfsc_classify(skb, sch, &err);
cl               1540 net/sched/sch_hfsc.c 	if (cl == NULL) {
cl               1547 net/sched/sch_hfsc.c 	first = !cl->qdisc->q.qlen;
cl               1548 net/sched/sch_hfsc.c 	err = qdisc_enqueue(skb, cl->qdisc, to_free);
cl               1551 net/sched/sch_hfsc.c 			cl->qstats.drops++;
cl               1558 net/sched/sch_hfsc.c 		if (cl->cl_flags & HFSC_RSC)
cl               1559 net/sched/sch_hfsc.c 			init_ed(cl, len);
cl               1560 net/sched/sch_hfsc.c 		if (cl->cl_flags & HFSC_FSC)
cl               1561 net/sched/sch_hfsc.c 			init_vf(cl, len);
cl               1567 net/sched/sch_hfsc.c 		if (cl->cl_flags & HFSC_RSC)
cl               1568 net/sched/sch_hfsc.c 			cl->qdisc->ops->peek(cl->qdisc);
cl               1582 net/sched/sch_hfsc.c 	struct hfsc_class *cl;
cl               1598 net/sched/sch_hfsc.c 	cl = eltree_get_mindl(q, cur_time);
cl               1599 net/sched/sch_hfsc.c 	if (cl) {
cl               1606 net/sched/sch_hfsc.c 		cl = vttree_get_minvt(&q->root, cur_time);
cl               1607 net/sched/sch_hfsc.c 		if (cl == NULL) {
cl               1614 net/sched/sch_hfsc.c 	skb = qdisc_dequeue_peeked(cl->qdisc);
cl               1616 net/sched/sch_hfsc.c 		qdisc_warn_nonwc("HFSC", cl->qdisc);
cl               1620 net/sched/sch_hfsc.c 	bstats_update(&cl->bstats, skb);
cl               1621 net/sched/sch_hfsc.c 	update_vf(cl, qdisc_pkt_len(skb), cur_time);
cl               1623 net/sched/sch_hfsc.c 		cl->cl_cumul += qdisc_pkt_len(skb);
cl               1625 net/sched/sch_hfsc.c 	if (cl->cl_flags & HFSC_RSC) {
cl               1626 net/sched/sch_hfsc.c 		if (cl->qdisc->q.qlen != 0) {
cl               1628 net/sched/sch_hfsc.c 			next_len = qdisc_peek_len(cl->qdisc);
cl               1630 net/sched/sch_hfsc.c 				update_ed(cl, next_len);
cl               1632 net/sched/sch_hfsc.c 				update_d(cl, next_len);
cl               1635 net/sched/sch_hfsc.c 			eltree_remove(cl);
cl                213 net/sched/sch_htb.c 	struct htb_class *cl;
cl                224 net/sched/sch_htb.c 	cl = htb_find(skb->priority, sch);
cl                225 net/sched/sch_htb.c 	if (cl) {
cl                226 net/sched/sch_htb.c 		if (cl->level == 0)
cl                227 net/sched/sch_htb.c 			return cl;
cl                229 net/sched/sch_htb.c 		tcf = rcu_dereference_bh(cl->filter_list);
cl                247 net/sched/sch_htb.c 		cl = (void *)res.class;
cl                248 net/sched/sch_htb.c 		if (!cl) {
cl                251 net/sched/sch_htb.c 			cl = htb_find(res.classid, sch);
cl                252 net/sched/sch_htb.c 			if (!cl)
cl                255 net/sched/sch_htb.c 		if (!cl->level)
cl                256 net/sched/sch_htb.c 			return cl;	/* we hit leaf; return it */
cl                259 net/sched/sch_htb.c 		tcf = rcu_dereference_bh(cl->filter_list);
cl                262 net/sched/sch_htb.c 	cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
cl                263 net/sched/sch_htb.c 	if (!cl || cl->level)
cl                265 net/sched/sch_htb.c 	return cl;
cl                275 net/sched/sch_htb.c 			       struct htb_class *cl, int prio)
cl                284 net/sched/sch_htb.c 		if (cl->common.classid > c->common.classid)
cl                289 net/sched/sch_htb.c 	rb_link_node(&cl->node[prio], parent, p);
cl                290 net/sched/sch_htb.c 	rb_insert_color(&cl->node[prio], root);
cl                301 net/sched/sch_htb.c 				 struct htb_class *cl, s64 delay)
cl                303 net/sched/sch_htb.c 	struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL;
cl                305 net/sched/sch_htb.c 	cl->pq_key = q->now + delay;
cl                306 net/sched/sch_htb.c 	if (cl->pq_key == q->now)
cl                307 net/sched/sch_htb.c 		cl->pq_key++;
cl                310 net/sched/sch_htb.c 	if (q->near_ev_cache[cl->level] > cl->pq_key)
cl                311 net/sched/sch_htb.c 		q->near_ev_cache[cl->level] = cl->pq_key;
cl                317 net/sched/sch_htb.c 		if (cl->pq_key >= c->pq_key)
cl                322 net/sched/sch_htb.c 	rb_link_node(&cl->pq_node, parent, p);
cl                323 net/sched/sch_htb.c 	rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
cl                344 net/sched/sch_htb.c 					struct htb_class *cl, int mask)
cl                346 net/sched/sch_htb.c 	q->row_mask[cl->level] |= mask;
cl                350 net/sched/sch_htb.c 		htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio);
cl                373 net/sched/sch_htb.c 						 struct htb_class *cl, int mask)
cl                376 net/sched/sch_htb.c 	struct htb_level *hlevel = &q->hlevel[cl->level];
cl                383 net/sched/sch_htb.c 		if (hprio->ptr == cl->node + prio)
cl                386 net/sched/sch_htb.c 		htb_safe_rb_erase(cl->node + prio, &hprio->row);
cl                390 net/sched/sch_htb.c 	q->row_mask[cl->level] &= ~m;
cl                400 net/sched/sch_htb.c static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
cl                402 net/sched/sch_htb.c 	struct htb_class *p = cl->parent;
cl                403 net/sched/sch_htb.c 	long m, mask = cl->prio_activity;
cl                405 net/sched/sch_htb.c 	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
cl                417 net/sched/sch_htb.c 			htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio);
cl                420 net/sched/sch_htb.c 		cl = p;
cl                421 net/sched/sch_htb.c 		p = cl->parent;
cl                424 net/sched/sch_htb.c 	if (cl->cmode == HTB_CAN_SEND && mask)
cl                425 net/sched/sch_htb.c 		htb_add_class_to_row(q, cl, mask);
cl                435 net/sched/sch_htb.c static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
cl                437 net/sched/sch_htb.c 	struct htb_class *p = cl->parent;
cl                438 net/sched/sch_htb.c 	long m, mask = cl->prio_activity;
cl                440 net/sched/sch_htb.c 	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
cl                447 net/sched/sch_htb.c 			if (p->inner.clprio[prio].ptr == cl->node + prio) {
cl                452 net/sched/sch_htb.c 				p->inner.clprio[prio].last_ptr_id = cl->common.classid;
cl                456 net/sched/sch_htb.c 			htb_safe_rb_erase(cl->node + prio,
cl                464 net/sched/sch_htb.c 		cl = p;
cl                465 net/sched/sch_htb.c 		p = cl->parent;
cl                468 net/sched/sch_htb.c 	if (cl->cmode == HTB_CAN_SEND && mask)
cl                469 net/sched/sch_htb.c 		htb_remove_class_from_row(q, cl, mask);
cl                472 net/sched/sch_htb.c static inline s64 htb_lowater(const struct htb_class *cl)
cl                475 net/sched/sch_htb.c 		return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
cl                479 net/sched/sch_htb.c static inline s64 htb_hiwater(const struct htb_class *cl)
cl                482 net/sched/sch_htb.c 		return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
cl                500 net/sched/sch_htb.c htb_class_mode(struct htb_class *cl, s64 *diff)
cl                504 net/sched/sch_htb.c 	if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
cl                509 net/sched/sch_htb.c 	if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
cl                526 net/sched/sch_htb.c htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
cl                528 net/sched/sch_htb.c 	enum htb_cmode new_mode = htb_class_mode(cl, diff);
cl                530 net/sched/sch_htb.c 	if (new_mode == cl->cmode)
cl                534 net/sched/sch_htb.c 		cl->overlimits++;
cl                538 net/sched/sch_htb.c 	if (cl->prio_activity) {	/* not necessary: speed optimization */
cl                539 net/sched/sch_htb.c 		if (cl->cmode != HTB_CANT_SEND)
cl                540 net/sched/sch_htb.c 			htb_deactivate_prios(q, cl);
cl                541 net/sched/sch_htb.c 		cl->cmode = new_mode;
cl                543 net/sched/sch_htb.c 			htb_activate_prios(q, cl);
cl                545 net/sched/sch_htb.c 		cl->cmode = new_mode;
cl                555 net/sched/sch_htb.c static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
cl                557 net/sched/sch_htb.c 	WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
cl                559 net/sched/sch_htb.c 	if (!cl->prio_activity) {
cl                560 net/sched/sch_htb.c 		cl->prio_activity = 1 << cl->prio;
cl                561 net/sched/sch_htb.c 		htb_activate_prios(q, cl);
cl                571 net/sched/sch_htb.c static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
cl                573 net/sched/sch_htb.c 	WARN_ON(!cl->prio_activity);
cl                575 net/sched/sch_htb.c 	htb_deactivate_prios(q, cl);
cl                576 net/sched/sch_htb.c 	cl->prio_activity = 0;
cl                585 net/sched/sch_htb.c 	struct htb_class *cl = htb_classify(skb, sch, &ret);
cl                587 net/sched/sch_htb.c 	if (cl == HTB_DIRECT) {
cl                596 net/sched/sch_htb.c 	} else if (!cl) {
cl                602 net/sched/sch_htb.c 	} else if ((ret = qdisc_enqueue(skb, cl->leaf.q,
cl                606 net/sched/sch_htb.c 			cl->drops++;
cl                610 net/sched/sch_htb.c 		htb_activate(q, cl);
cl                618 net/sched/sch_htb.c static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
cl                620 net/sched/sch_htb.c 	s64 toks = diff + cl->tokens;
cl                622 net/sched/sch_htb.c 	if (toks > cl->buffer)
cl                623 net/sched/sch_htb.c 		toks = cl->buffer;
cl                624 net/sched/sch_htb.c 	toks -= (s64) psched_l2t_ns(&cl->rate, bytes);
cl                625 net/sched/sch_htb.c 	if (toks <= -cl->mbuffer)
cl                626 net/sched/sch_htb.c 		toks = 1 - cl->mbuffer;
cl                628 net/sched/sch_htb.c 	cl->tokens = toks;
cl                631 net/sched/sch_htb.c static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
cl                633 net/sched/sch_htb.c 	s64 toks = diff + cl->ctokens;
cl                635 net/sched/sch_htb.c 	if (toks > cl->cbuffer)
cl                636 net/sched/sch_htb.c 		toks = cl->cbuffer;
cl                637 net/sched/sch_htb.c 	toks -= (s64) psched_l2t_ns(&cl->ceil, bytes);
cl                638 net/sched/sch_htb.c 	if (toks <= -cl->mbuffer)
cl                639 net/sched/sch_htb.c 		toks = 1 - cl->mbuffer;
cl                641 net/sched/sch_htb.c 	cl->ctokens = toks;
cl                655 net/sched/sch_htb.c static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
cl                662 net/sched/sch_htb.c 	while (cl) {
cl                663 net/sched/sch_htb.c 		diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
cl                664 net/sched/sch_htb.c 		if (cl->level >= level) {
cl                665 net/sched/sch_htb.c 			if (cl->level == level)
cl                666 net/sched/sch_htb.c 				cl->xstats.lends++;
cl                667 net/sched/sch_htb.c 			htb_accnt_tokens(cl, bytes, diff);
cl                669 net/sched/sch_htb.c 			cl->xstats.borrows++;
cl                670 net/sched/sch_htb.c 			cl->tokens += diff;	/* we moved t_c; update tokens */
cl                672 net/sched/sch_htb.c 		htb_accnt_ctokens(cl, bytes, diff);
cl                673 net/sched/sch_htb.c 		cl->t_c = q->now;
cl                675 net/sched/sch_htb.c 		old_mode = cl->cmode;
cl                677 net/sched/sch_htb.c 		htb_change_class_mode(q, cl, &diff);
cl                678 net/sched/sch_htb.c 		if (old_mode != cl->cmode) {
cl                680 net/sched/sch_htb.c 				htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
cl                681 net/sched/sch_htb.c 			if (cl->cmode != HTB_CAN_SEND)
cl                682 net/sched/sch_htb.c 				htb_add_to_wait_tree(q, cl, diff);
cl                686 net/sched/sch_htb.c 		if (cl->level)
cl                687 net/sched/sch_htb.c 			bstats_update(&cl->bstats, skb);
cl                689 net/sched/sch_htb.c 		cl = cl->parent;
cl                711 net/sched/sch_htb.c 		struct htb_class *cl;
cl                718 net/sched/sch_htb.c 		cl = rb_entry(p, struct htb_class, pq_node);
cl                719 net/sched/sch_htb.c 		if (cl->pq_key > q->now)
cl                720 net/sched/sch_htb.c 			return cl->pq_key;
cl                723 net/sched/sch_htb.c 		diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
cl                724 net/sched/sch_htb.c 		htb_change_class_mode(q, cl, &diff);
cl                725 net/sched/sch_htb.c 		if (cl->cmode != HTB_CAN_SEND)
cl                726 net/sched/sch_htb.c 			htb_add_to_wait_tree(q, cl, diff);
cl                746 net/sched/sch_htb.c 		struct htb_class *cl =
cl                749 net/sched/sch_htb.c 		if (id > cl->common.classid) {
cl                751 net/sched/sch_htb.c 		} else if (id < cl->common.classid) {
cl                804 net/sched/sch_htb.c 			struct htb_class *cl;
cl                807 net/sched/sch_htb.c 			cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
cl                808 net/sched/sch_htb.c 			if (!cl->level)
cl                809 net/sched/sch_htb.c 				return cl;
cl                810 net/sched/sch_htb.c 			clp = &cl->inner.clprio[prio];
cl                827 net/sched/sch_htb.c 	struct htb_class *cl, *start;
cl                832 net/sched/sch_htb.c 	start = cl = htb_lookup_leaf(hprio, prio);
cl                836 net/sched/sch_htb.c 		if (unlikely(!cl))
cl                844 net/sched/sch_htb.c 		if (unlikely(cl->leaf.q->q.qlen == 0)) {
cl                846 net/sched/sch_htb.c 			htb_deactivate(q, cl);
cl                854 net/sched/sch_htb.c 			if (cl == start)	/* fix start if we just deleted it */
cl                856 net/sched/sch_htb.c 			cl = next;
cl                860 net/sched/sch_htb.c 		skb = cl->leaf.q->dequeue(cl->leaf.q);
cl                864 net/sched/sch_htb.c 		qdisc_warn_nonwc("htb", cl->leaf.q);
cl                865 net/sched/sch_htb.c 		htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr:
cl                867 net/sched/sch_htb.c 		cl = htb_lookup_leaf(hprio, prio);
cl                869 net/sched/sch_htb.c 	} while (cl != start);
cl                872 net/sched/sch_htb.c 		bstats_update(&cl->bstats, skb);
cl                873 net/sched/sch_htb.c 		cl->leaf.deficit[level] -= qdisc_pkt_len(skb);
cl                874 net/sched/sch_htb.c 		if (cl->leaf.deficit[level] < 0) {
cl                875 net/sched/sch_htb.c 			cl->leaf.deficit[level] += cl->quantum;
cl                876 net/sched/sch_htb.c 			htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr :
cl                882 net/sched/sch_htb.c 		if (!cl->leaf.q->q.qlen)
cl                883 net/sched/sch_htb.c 			htb_deactivate(q, cl);
cl                884 net/sched/sch_htb.c 		htb_charge_class(q, cl, level, skb);
cl                952 net/sched/sch_htb.c 	struct htb_class *cl;
cl                956 net/sched/sch_htb.c 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
cl                957 net/sched/sch_htb.c 			if (cl->level)
cl                958 net/sched/sch_htb.c 				memset(&cl->inner, 0, sizeof(cl->inner));
cl                960 net/sched/sch_htb.c 				if (cl->leaf.q)
cl                961 net/sched/sch_htb.c 					qdisc_reset(cl->leaf.q);
cl                963 net/sched/sch_htb.c 			cl->prio_activity = 0;
cl                964 net/sched/sch_htb.c 			cl->cmode = HTB_CAN_SEND;
cl               1077 net/sched/sch_htb.c 	struct htb_class *cl = (struct htb_class *)arg;
cl               1084 net/sched/sch_htb.c 	tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
cl               1085 net/sched/sch_htb.c 	tcm->tcm_handle = cl->common.classid;
cl               1086 net/sched/sch_htb.c 	if (!cl->level && cl->leaf.q)
cl               1087 net/sched/sch_htb.c 		tcm->tcm_info = cl->leaf.q->handle;
cl               1095 net/sched/sch_htb.c 	psched_ratecfg_getrate(&opt.rate, &cl->rate);
cl               1096 net/sched/sch_htb.c 	opt.buffer = PSCHED_NS2TICKS(cl->buffer);
cl               1097 net/sched/sch_htb.c 	psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
cl               1098 net/sched/sch_htb.c 	opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
cl               1099 net/sched/sch_htb.c 	opt.quantum = cl->quantum;
cl               1100 net/sched/sch_htb.c 	opt.prio = cl->prio;
cl               1101 net/sched/sch_htb.c 	opt.level = cl->level;
cl               1104 net/sched/sch_htb.c 	if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
cl               1105 net/sched/sch_htb.c 	    nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps,
cl               1108 net/sched/sch_htb.c 	if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
cl               1109 net/sched/sch_htb.c 	    nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps,
cl               1123 net/sched/sch_htb.c 	struct htb_class *cl = (struct htb_class *)arg;
cl               1125 net/sched/sch_htb.c 		.drops = cl->drops,
cl               1126 net/sched/sch_htb.c 		.overlimits = cl->overlimits,
cl               1130 net/sched/sch_htb.c 	if (!cl->level && cl->leaf.q)
cl               1131 net/sched/sch_htb.c 		qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog);
cl               1133 net/sched/sch_htb.c 	cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
cl               1135 net/sched/sch_htb.c 	cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
cl               1139 net/sched/sch_htb.c 				  d, NULL, &cl->bstats) < 0 ||
cl               1140 net/sched/sch_htb.c 	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
cl               1144 net/sched/sch_htb.c 	return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
cl               1150 net/sched/sch_htb.c 	struct htb_class *cl = (struct htb_class *)arg;
cl               1152 net/sched/sch_htb.c 	if (cl->level)
cl               1156 net/sched/sch_htb.c 				     cl->common.classid, extack)) == NULL)
cl               1159 net/sched/sch_htb.c 	*old = qdisc_replace(sch, new, &cl->leaf.q);
cl               1165 net/sched/sch_htb.c 	struct htb_class *cl = (struct htb_class *)arg;
cl               1166 net/sched/sch_htb.c 	return !cl->level ? cl->leaf.q : NULL;
cl               1171 net/sched/sch_htb.c 	struct htb_class *cl = (struct htb_class *)arg;
cl               1173 net/sched/sch_htb.c 	htb_deactivate(qdisc_priv(sch), cl);
cl               1176 net/sched/sch_htb.c static inline int htb_parent_last_child(struct htb_class *cl)
cl               1178 net/sched/sch_htb.c 	if (!cl->parent)
cl               1181 net/sched/sch_htb.c 	if (cl->parent->children > 1)
cl               1187 net/sched/sch_htb.c static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
cl               1190 net/sched/sch_htb.c 	struct htb_class *parent = cl->parent;
cl               1192 net/sched/sch_htb.c 	WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity);
cl               1207 net/sched/sch_htb.c static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
cl               1209 net/sched/sch_htb.c 	if (!cl->level) {
cl               1210 net/sched/sch_htb.c 		WARN_ON(!cl->leaf.q);
cl               1211 net/sched/sch_htb.c 		qdisc_put(cl->leaf.q);
cl               1213 net/sched/sch_htb.c 	gen_kill_estimator(&cl->rate_est);
cl               1214 net/sched/sch_htb.c 	tcf_block_put(cl->block);
cl               1215 net/sched/sch_htb.c 	kfree(cl);
cl               1222 net/sched/sch_htb.c 	struct htb_class *cl;
cl               1235 net/sched/sch_htb.c 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
cl               1236 net/sched/sch_htb.c 			tcf_block_put(cl->block);
cl               1237 net/sched/sch_htb.c 			cl->block = NULL;
cl               1241 net/sched/sch_htb.c 		hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
cl               1243 net/sched/sch_htb.c 			htb_destroy_class(sch, cl);
cl               1252 net/sched/sch_htb.c 	struct htb_class *cl = (struct htb_class *)arg;
cl               1260 net/sched/sch_htb.c 	if (cl->children || cl->filter_cnt)
cl               1263 net/sched/sch_htb.c 	if (!cl->level && htb_parent_last_child(cl)) {
cl               1265 net/sched/sch_htb.c 					  cl->parent->common.classid,
cl               1272 net/sched/sch_htb.c 	if (!cl->level)
cl               1273 net/sched/sch_htb.c 		qdisc_purge_queue(cl->leaf.q);
cl               1276 net/sched/sch_htb.c 	qdisc_class_hash_remove(&q->clhash, &cl->common);
cl               1277 net/sched/sch_htb.c 	if (cl->parent)
cl               1278 net/sched/sch_htb.c 		cl->parent->children--;
cl               1280 net/sched/sch_htb.c 	if (cl->prio_activity)
cl               1281 net/sched/sch_htb.c 		htb_deactivate(q, cl);
cl               1283 net/sched/sch_htb.c 	if (cl->cmode != HTB_CAN_SEND)
cl               1284 net/sched/sch_htb.c 		htb_safe_rb_erase(&cl->pq_node,
cl               1285 net/sched/sch_htb.c 				  &q->hlevel[cl->level].wait_pq);
cl               1288 net/sched/sch_htb.c 		htb_parent_to_leaf(q, cl, new_q);
cl               1292 net/sched/sch_htb.c 	htb_destroy_class(sch, cl);
cl               1302 net/sched/sch_htb.c 	struct htb_class *cl = (struct htb_class *)*arg, *parent;
cl               1338 net/sched/sch_htb.c 	if (!cl) {		/* new class */
cl               1367 net/sched/sch_htb.c 		cl = kzalloc(sizeof(*cl), GFP_KERNEL);
cl               1368 net/sched/sch_htb.c 		if (!cl)
cl               1371 net/sched/sch_htb.c 		err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
cl               1373 net/sched/sch_htb.c 			kfree(cl);
cl               1377 net/sched/sch_htb.c 			err = gen_new_estimator(&cl->bstats, NULL,
cl               1378 net/sched/sch_htb.c 						&cl->rate_est,
cl               1383 net/sched/sch_htb.c 				tcf_block_put(cl->block);
cl               1384 net/sched/sch_htb.c 				kfree(cl);
cl               1389 net/sched/sch_htb.c 		cl->children = 0;
cl               1390 net/sched/sch_htb.c 		RB_CLEAR_NODE(&cl->pq_node);
cl               1393 net/sched/sch_htb.c 			RB_CLEAR_NODE(&cl->node[prio]);
cl               1419 net/sched/sch_htb.c 		cl->leaf.q = new_q ? new_q : &noop_qdisc;
cl               1421 net/sched/sch_htb.c 		cl->common.classid = classid;
cl               1422 net/sched/sch_htb.c 		cl->parent = parent;
cl               1425 net/sched/sch_htb.c 		cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
cl               1426 net/sched/sch_htb.c 		cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
cl               1427 net/sched/sch_htb.c 		cl->mbuffer = 60ULL * NSEC_PER_SEC;	/* 1min */
cl               1428 net/sched/sch_htb.c 		cl->t_c = ktime_get_ns();
cl               1429 net/sched/sch_htb.c 		cl->cmode = HTB_CAN_SEND;
cl               1432 net/sched/sch_htb.c 		qdisc_class_hash_insert(&q->clhash, &cl->common);
cl               1435 net/sched/sch_htb.c 		if (cl->leaf.q != &noop_qdisc)
cl               1436 net/sched/sch_htb.c 			qdisc_hash_add(cl->leaf.q, true);
cl               1439 net/sched/sch_htb.c 			err = gen_replace_estimator(&cl->bstats, NULL,
cl               1440 net/sched/sch_htb.c 						    &cl->rate_est,
cl               1454 net/sched/sch_htb.c 	psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
cl               1455 net/sched/sch_htb.c 	psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
cl               1460 net/sched/sch_htb.c 	if (!cl->level) {
cl               1461 net/sched/sch_htb.c 		u64 quantum = cl->rate.rate_bytes_ps;
cl               1464 net/sched/sch_htb.c 		cl->quantum = min_t(u64, quantum, INT_MAX);
cl               1466 net/sched/sch_htb.c 		if (!hopt->quantum && cl->quantum < 1000) {
cl               1468 net/sched/sch_htb.c 			cl->quantum = 1000;
cl               1470 net/sched/sch_htb.c 		if (!hopt->quantum && cl->quantum > 200000) {
cl               1472 net/sched/sch_htb.c 			cl->quantum = 200000;
cl               1475 net/sched/sch_htb.c 			cl->quantum = hopt->quantum;
cl               1476 net/sched/sch_htb.c 		if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
cl               1477 net/sched/sch_htb.c 			cl->prio = TC_HTB_NUMPRIO - 1;
cl               1480 net/sched/sch_htb.c 	cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
cl               1481 net/sched/sch_htb.c 	cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
cl               1488 net/sched/sch_htb.c 			    cl->common.classid, (warn == -1 ? "small" : "big"));
cl               1492 net/sched/sch_htb.c 	*arg = (unsigned long)cl;
cl               1503 net/sched/sch_htb.c 	struct htb_class *cl = (struct htb_class *)arg;
cl               1505 net/sched/sch_htb.c 	return cl ? cl->block : q->block;
cl               1511 net/sched/sch_htb.c 	struct htb_class *cl = htb_find(classid, sch);
cl               1522 net/sched/sch_htb.c 	if (cl)
cl               1523 net/sched/sch_htb.c 		cl->filter_cnt++;
cl               1524 net/sched/sch_htb.c 	return (unsigned long)cl;
cl               1529 net/sched/sch_htb.c 	struct htb_class *cl = (struct htb_class *)arg;
cl               1531 net/sched/sch_htb.c 	if (cl)
cl               1532 net/sched/sch_htb.c 		cl->filter_cnt--;
cl               1538 net/sched/sch_htb.c 	struct htb_class *cl;
cl               1545 net/sched/sch_htb.c 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
cl               1550 net/sched/sch_htb.c 			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
cl                 39 net/sched/sch_ingress.c static void ingress_unbind_filter(struct Qdisc *sch, unsigned long cl)
cl                 47 net/sched/sch_ingress.c static struct tcf_block *ingress_tcf_block(struct Qdisc *sch, unsigned long cl,
cl                165 net/sched/sch_ingress.c static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl,
cl                170 net/sched/sch_ingress.c 	switch (cl) {
cl                174 net/sched/sch_mq.c static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl)
cl                177 net/sched/sch_mq.c 	unsigned long ntx = cl - 1;
cl                190 net/sched/sch_mq.c static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
cl                193 net/sched/sch_mq.c 	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
cl                207 net/sched/sch_mq.c 	graft_offload.graft_params.queue = cl - 1;
cl                216 net/sched/sch_mq.c static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl)
cl                218 net/sched/sch_mq.c 	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
cl                232 net/sched/sch_mq.c static int mq_dump_class(struct Qdisc *sch, unsigned long cl,
cl                235 net/sched/sch_mq.c 	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
cl                238 net/sched/sch_mq.c 	tcm->tcm_handle |= TC_H_MIN(cl);
cl                243 net/sched/sch_mq.c static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
cl                246 net/sched/sch_mq.c 	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
cl                310 net/sched/sch_mqprio.c 					     unsigned long cl)
cl                313 net/sched/sch_mqprio.c 	unsigned long ntx = cl - 1;
cl                320 net/sched/sch_mqprio.c static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
cl                324 net/sched/sch_mqprio.c 	struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
cl                459 net/sched/sch_mqprio.c static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
cl                461 net/sched/sch_mqprio.c 	struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
cl                488 net/sched/sch_mqprio.c static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
cl                491 net/sched/sch_mqprio.c 	if (cl < TC_H_MIN_PRIORITY) {
cl                492 net/sched/sch_mqprio.c 		struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
cl                494 net/sched/sch_mqprio.c 		int tc = netdev_txq_to_tc(dev, cl - 1);
cl                504 net/sched/sch_mqprio.c 	tcm->tcm_handle |= TC_H_MIN(cl);
cl                508 net/sched/sch_mqprio.c static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
cl                513 net/sched/sch_mqprio.c 	if (cl >= TC_H_MIN_PRIORITY) {
cl                519 net/sched/sch_mqprio.c 		struct netdev_tc_txq tc = dev->tc_to_txq[cl & TC_BITMASK];
cl                558 net/sched/sch_mqprio.c 		struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
cl                320 net/sched/sch_multiq.c static void multiq_unbind(struct Qdisc *q, unsigned long cl)
cl                324 net/sched/sch_multiq.c static int multiq_dump_class(struct Qdisc *sch, unsigned long cl,
cl                329 net/sched/sch_multiq.c 	tcm->tcm_handle |= TC_H_MIN(cl);
cl                330 net/sched/sch_multiq.c 	tcm->tcm_info = q->queues[cl - 1]->handle;
cl                334 net/sched/sch_multiq.c static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
cl                340 net/sched/sch_multiq.c 	cl_q = q->queues[cl - 1];
cl                370 net/sched/sch_multiq.c static struct tcf_block *multiq_tcf_block(struct Qdisc *sch, unsigned long cl,
cl                375 net/sched/sch_multiq.c 	if (cl)
cl               1213 net/sched/sch_netem.c static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
cl               1218 net/sched/sch_netem.c 	if (cl != 1 || !q->qdisc) 	/* only one class */
cl                343 net/sched/sch_prio.c static void prio_unbind(struct Qdisc *q, unsigned long cl)
cl                347 net/sched/sch_prio.c static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb,
cl                352 net/sched/sch_prio.c 	tcm->tcm_handle |= TC_H_MIN(cl);
cl                353 net/sched/sch_prio.c 	tcm->tcm_info = q->queues[cl-1]->handle;
cl                357 net/sched/sch_prio.c static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
cl                363 net/sched/sch_prio.c 	cl_q = q->queues[cl - 1];
cl                393 net/sched/sch_prio.c static struct tcf_block *prio_tcf_block(struct Qdisc *sch, unsigned long cl,
cl                398 net/sched/sch_prio.c 	if (cl)
cl                313 net/sched/sch_qfq.c 			   struct qfq_class *cl)
cl                315 net/sched/sch_qfq.c 	cl->agg = agg;
cl                318 net/sched/sch_qfq.c 	if (cl->qdisc->q.qlen > 0) { /* adding an active class */
cl                319 net/sched/sch_qfq.c 		list_add_tail(&cl->alist, &agg->active);
cl                321 net/sched/sch_qfq.c 		    cl && q->in_serv_agg != agg) /* agg was inactive */
cl                341 net/sched/sch_qfq.c static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
cl                343 net/sched/sch_qfq.c 	struct qfq_aggregate *agg = cl->agg;
cl                346 net/sched/sch_qfq.c 	list_del(&cl->alist); /* remove from RR queue of the aggregate */
cl                352 net/sched/sch_qfq.c static void qfq_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
cl                354 net/sched/sch_qfq.c 	struct qfq_aggregate *agg = cl->agg;
cl                356 net/sched/sch_qfq.c 	cl->agg = NULL;
cl                365 net/sched/sch_qfq.c static void qfq_deact_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
cl                367 net/sched/sch_qfq.c 	if (cl->qdisc->q.qlen > 0) /* class is active */
cl                368 net/sched/sch_qfq.c 		qfq_deactivate_class(q, cl);
cl                370 net/sched/sch_qfq.c 	qfq_rm_from_agg(q, cl);
cl                374 net/sched/sch_qfq.c static int qfq_change_agg(struct Qdisc *sch, struct qfq_class *cl, u32 weight,
cl                386 net/sched/sch_qfq.c 	qfq_deact_rm_from_agg(q, cl);
cl                387 net/sched/sch_qfq.c 	qfq_add_to_agg(q, new_agg, cl);
cl                397 net/sched/sch_qfq.c 	struct qfq_class *cl = (struct qfq_class *)*arg;
cl                436 net/sched/sch_qfq.c 	if (cl != NULL &&
cl                437 net/sched/sch_qfq.c 	    lmax == cl->agg->lmax &&
cl                438 net/sched/sch_qfq.c 	    weight == cl->agg->class_weight)
cl                441 net/sched/sch_qfq.c 	delta_w = weight - (cl ? cl->agg->class_weight : 0);
cl                449 net/sched/sch_qfq.c 	if (cl != NULL) { /* modify existing class */
cl                451 net/sched/sch_qfq.c 			err = gen_replace_estimator(&cl->bstats, NULL,
cl                452 net/sched/sch_qfq.c 						    &cl->rate_est,
cl                464 net/sched/sch_qfq.c 	cl = kzalloc(sizeof(struct qfq_class), GFP_KERNEL);
cl                465 net/sched/sch_qfq.c 	if (cl == NULL)
cl                468 net/sched/sch_qfq.c 	cl->common.classid = classid;
cl                469 net/sched/sch_qfq.c 	cl->deficit = lmax;
cl                471 net/sched/sch_qfq.c 	cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
cl                473 net/sched/sch_qfq.c 	if (cl->qdisc == NULL)
cl                474 net/sched/sch_qfq.c 		cl->qdisc = &noop_qdisc;
cl                477 net/sched/sch_qfq.c 		err = gen_new_estimator(&cl->bstats, NULL,
cl                478 net/sched/sch_qfq.c 					&cl->rate_est,
cl                486 net/sched/sch_qfq.c 	if (cl->qdisc != &noop_qdisc)
cl                487 net/sched/sch_qfq.c 		qdisc_hash_add(cl->qdisc, true);
cl                489 net/sched/sch_qfq.c 	qdisc_class_hash_insert(&q->clhash, &cl->common);
cl                502 net/sched/sch_qfq.c 			gen_kill_estimator(&cl->rate_est);
cl                509 net/sched/sch_qfq.c 		qfq_deact_rm_from_agg(q, cl);
cl                510 net/sched/sch_qfq.c 	qfq_add_to_agg(q, new_agg, cl);
cl                513 net/sched/sch_qfq.c 	*arg = (unsigned long)cl;
cl                517 net/sched/sch_qfq.c 	qdisc_put(cl->qdisc);
cl                518 net/sched/sch_qfq.c 	kfree(cl);
cl                522 net/sched/sch_qfq.c static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl)
cl                526 net/sched/sch_qfq.c 	qfq_rm_from_agg(q, cl);
cl                527 net/sched/sch_qfq.c 	gen_kill_estimator(&cl->rate_est);
cl                528 net/sched/sch_qfq.c 	qdisc_put(cl->qdisc);
cl                529 net/sched/sch_qfq.c 	kfree(cl);
cl                535 net/sched/sch_qfq.c 	struct qfq_class *cl = (struct qfq_class *)arg;
cl                537 net/sched/sch_qfq.c 	if (cl->filter_cnt > 0)
cl                542 net/sched/sch_qfq.c 	qdisc_purge_queue(cl->qdisc);
cl                543 net/sched/sch_qfq.c 	qdisc_class_hash_remove(&q->clhash, &cl->common);
cl                547 net/sched/sch_qfq.c 	qfq_destroy_class(sch, cl);
cl                556 net/sched/sch_qfq.c static struct tcf_block *qfq_tcf_block(struct Qdisc *sch, unsigned long cl,
cl                561 net/sched/sch_qfq.c 	if (cl)
cl                570 net/sched/sch_qfq.c 	struct qfq_class *cl = qfq_find_class(sch, classid);
cl                572 net/sched/sch_qfq.c 	if (cl != NULL)
cl                573 net/sched/sch_qfq.c 		cl->filter_cnt++;
cl                575 net/sched/sch_qfq.c 	return (unsigned long)cl;
cl                580 net/sched/sch_qfq.c 	struct qfq_class *cl = (struct qfq_class *)arg;
cl                582 net/sched/sch_qfq.c 	cl->filter_cnt--;
cl                589 net/sched/sch_qfq.c 	struct qfq_class *cl = (struct qfq_class *)arg;
cl                593 net/sched/sch_qfq.c 					cl->common.classid, NULL);
cl                598 net/sched/sch_qfq.c 	*old = qdisc_replace(sch, new, &cl->qdisc);
cl                604 net/sched/sch_qfq.c 	struct qfq_class *cl = (struct qfq_class *)arg;
cl                606 net/sched/sch_qfq.c 	return cl->qdisc;
cl                612 net/sched/sch_qfq.c 	struct qfq_class *cl = (struct qfq_class *)arg;
cl                616 net/sched/sch_qfq.c 	tcm->tcm_handle	= cl->common.classid;
cl                617 net/sched/sch_qfq.c 	tcm->tcm_info	= cl->qdisc->handle;
cl                622 net/sched/sch_qfq.c 	if (nla_put_u32(skb, TCA_QFQ_WEIGHT, cl->agg->class_weight) ||
cl                623 net/sched/sch_qfq.c 	    nla_put_u32(skb, TCA_QFQ_LMAX, cl->agg->lmax))
cl                635 net/sched/sch_qfq.c 	struct qfq_class *cl = (struct qfq_class *)arg;
cl                640 net/sched/sch_qfq.c 	xstats.weight = cl->agg->class_weight;
cl                641 net/sched/sch_qfq.c 	xstats.lmax = cl->agg->lmax;
cl                644 net/sched/sch_qfq.c 				  d, NULL, &cl->bstats) < 0 ||
cl                645 net/sched/sch_qfq.c 	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
cl                646 net/sched/sch_qfq.c 	    qdisc_qstats_copy(d, cl->qdisc) < 0)
cl                655 net/sched/sch_qfq.c 	struct qfq_class *cl;
cl                662 net/sched/sch_qfq.c 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
cl                667 net/sched/sch_qfq.c 			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
cl                680 net/sched/sch_qfq.c 	struct qfq_class *cl;
cl                687 net/sched/sch_qfq.c 		cl = qfq_find_class(sch, skb->priority);
cl                688 net/sched/sch_qfq.c 		if (cl != NULL)
cl                689 net/sched/sch_qfq.c 			return cl;
cl                707 net/sched/sch_qfq.c 		cl = (struct qfq_class *)res.class;
cl                708 net/sched/sch_qfq.c 		if (cl == NULL)
cl                709 net/sched/sch_qfq.c 			cl = qfq_find_class(sch, res.classid);
cl                710 net/sched/sch_qfq.c 		return cl;
cl                975 net/sched/sch_qfq.c 			struct qfq_class *cl, unsigned int len)
cl                977 net/sched/sch_qfq.c 	qdisc_dequeue_peeked(cl->qdisc);
cl                979 net/sched/sch_qfq.c 	cl->deficit -= (int) len;
cl                981 net/sched/sch_qfq.c 	if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */
cl                982 net/sched/sch_qfq.c 		list_del(&cl->alist);
cl                983 net/sched/sch_qfq.c 	else if (cl->deficit < qdisc_pkt_len(cl->qdisc->ops->peek(cl->qdisc))) {
cl                984 net/sched/sch_qfq.c 		cl->deficit += agg->lmax;
cl                985 net/sched/sch_qfq.c 		list_move_tail(&cl->alist, &agg->active);
cl                990 net/sched/sch_qfq.c 					   struct qfq_class **cl,
cl                995 net/sched/sch_qfq.c 	*cl = list_first_entry(&agg->active, struct qfq_class, alist);
cl                996 net/sched/sch_qfq.c 	skb = (*cl)->qdisc->ops->peek((*cl)->qdisc);
cl               1081 net/sched/sch_qfq.c 	struct qfq_class *cl;
cl               1090 net/sched/sch_qfq.c 		skb = qfq_peek_skb(in_serv_agg, &cl, &len);
cl               1127 net/sched/sch_qfq.c 		skb = qfq_peek_skb(in_serv_agg, &cl, &len);
cl               1136 net/sched/sch_qfq.c 	agg_dequeue(in_serv_agg, cl, len);
cl               1202 net/sched/sch_qfq.c 	struct qfq_class *cl;
cl               1207 net/sched/sch_qfq.c 	cl = qfq_classify(skb, sch, &err);
cl               1208 net/sched/sch_qfq.c 	if (cl == NULL) {
cl               1214 net/sched/sch_qfq.c 	pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
cl               1216 net/sched/sch_qfq.c 	if (unlikely(cl->agg->lmax < len)) {
cl               1218 net/sched/sch_qfq.c 			 cl->agg->lmax, len, cl->common.classid);
cl               1219 net/sched/sch_qfq.c 		err = qfq_change_agg(sch, cl, cl->agg->class_weight, len);
cl               1221 net/sched/sch_qfq.c 			cl->qstats.drops++;
cl               1227 net/sched/sch_qfq.c 	first = !cl->qdisc->q.qlen;
cl               1228 net/sched/sch_qfq.c 	err = qdisc_enqueue(skb, cl->qdisc, to_free);
cl               1232 net/sched/sch_qfq.c 			cl->qstats.drops++;
cl               1238 net/sched/sch_qfq.c 	cl->bstats.bytes += len;
cl               1239 net/sched/sch_qfq.c 	cl->bstats.packets += gso_segs;
cl               1243 net/sched/sch_qfq.c 	agg = cl->agg;
cl               1246 net/sched/sch_qfq.c 		if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) &&
cl               1248 net/sched/sch_qfq.c 		    == cl && cl->deficit < len)
cl               1249 net/sched/sch_qfq.c 			list_move_tail(&cl->alist, &agg->active);
cl               1255 net/sched/sch_qfq.c 	cl->deficit = agg->lmax;
cl               1256 net/sched/sch_qfq.c 	list_add_tail(&cl->alist, &agg->active);
cl               1258 net/sched/sch_qfq.c 	if (list_first_entry(&agg->active, struct qfq_class, alist) != cl ||
cl               1405 net/sched/sch_qfq.c 	struct qfq_class *cl = (struct qfq_class *)arg;
cl               1407 net/sched/sch_qfq.c 	qfq_deactivate_class(q, cl);
cl               1454 net/sched/sch_qfq.c 	struct qfq_class *cl;
cl               1458 net/sched/sch_qfq.c 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
cl               1459 net/sched/sch_qfq.c 			if (cl->qdisc->q.qlen > 0)
cl               1460 net/sched/sch_qfq.c 				qfq_deactivate_class(q, cl);
cl               1462 net/sched/sch_qfq.c 			qdisc_reset(cl->qdisc);
cl               1472 net/sched/sch_qfq.c 	struct qfq_class *cl;
cl               1479 net/sched/sch_qfq.c 		hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
cl               1481 net/sched/sch_qfq.c 			qfq_destroy_class(sch, cl);
cl                357 net/sched/sch_red.c static int red_dump_class(struct Qdisc *sch, unsigned long cl,
cl                611 net/sched/sch_sfb.c static int sfb_dump_class(struct Qdisc *sch, unsigned long cl,
cl                652 net/sched/sch_sfb.c static int sfb_delete(struct Qdisc *sch, unsigned long cl)
cl                669 net/sched/sch_sfb.c static struct tcf_block *sfb_tcf_block(struct Qdisc *sch, unsigned long cl,
cl                674 net/sched/sch_sfb.c 	if (cl)
cl                841 net/sched/sch_sfq.c static void sfq_unbind(struct Qdisc *q, unsigned long cl)
cl                845 net/sched/sch_sfq.c static struct tcf_block *sfq_tcf_block(struct Qdisc *sch, unsigned long cl,
cl                850 net/sched/sch_sfq.c 	if (cl)
cl                855 net/sched/sch_sfq.c static int sfq_dump_class(struct Qdisc *sch, unsigned long cl,
cl                858 net/sched/sch_sfq.c 	tcm->tcm_handle |= TC_H_MIN(cl);
cl                862 net/sched/sch_sfq.c static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
cl                866 net/sched/sch_sfq.c 	sfq_index idx = q->ht[cl - 1];
cl                246 net/sched/sch_skbprio.c static int skbprio_dump_class(struct Qdisc *sch, unsigned long cl,
cl                249 net/sched/sch_skbprio.c 	tcm->tcm_handle |= TC_H_MIN(cl);
cl                253 net/sched/sch_skbprio.c static int skbprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
cl                257 net/sched/sch_skbprio.c 	if (gnet_stats_copy_queue(d, NULL, &q->qstats[cl - 1],
cl                258 net/sched/sch_skbprio.c 		q->qstats[cl - 1].qlen) < 0)
cl               1674 net/sched/sch_taprio.c 					     unsigned long cl)
cl               1677 net/sched/sch_taprio.c 	unsigned long ntx = cl - 1;
cl               1685 net/sched/sch_taprio.c static int taprio_graft(struct Qdisc *sch, unsigned long cl,
cl               1691 net/sched/sch_taprio.c 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
cl               1699 net/sched/sch_taprio.c 	*old = q->qdiscs[cl - 1];
cl               1700 net/sched/sch_taprio.c 	q->qdiscs[cl - 1] = new;
cl               1847 net/sched/sch_taprio.c static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
cl               1849 net/sched/sch_taprio.c 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
cl               1866 net/sched/sch_taprio.c static int taprio_dump_class(struct Qdisc *sch, unsigned long cl,
cl               1869 net/sched/sch_taprio.c 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
cl               1872 net/sched/sch_taprio.c 	tcm->tcm_handle |= TC_H_MIN(cl);
cl               1878 net/sched/sch_taprio.c static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
cl               1883 net/sched/sch_taprio.c 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
cl                478 net/sched/sch_tbf.c static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
cl                 78 net/sunrpc/svc_xprt.c 	struct svc_xprt_class *cl;
cl                 86 net/sunrpc/svc_xprt.c 	list_for_each_entry(cl, &svc_xprt_class_list, xcl_list) {
cl                 87 net/sunrpc/svc_xprt.c 		if (strcmp(xcl->xcl_name, cl->xcl_name) == 0)
cl                101 samples/mei/mei-amt-version.c static void mei_deinit(struct mei *cl)
cl                103 samples/mei/mei-amt-version.c 	if (cl->fd != -1)
cl                104 samples/mei/mei-amt-version.c 		close(cl->fd);
cl                105 samples/mei/mei-amt-version.c 	cl->fd = -1;
cl                106 samples/mei/mei-amt-version.c 	cl->buf_size = 0;
cl                107 samples/mei/mei-amt-version.c 	cl->prot_ver = 0;
cl                108 samples/mei/mei-amt-version.c 	cl->initialized = false;
cl                115 samples/mei/mei-amt-version.c 	struct mei_client *cl;
cl                135 samples/mei/mei-amt-version.c 	cl = &data.out_client_properties;
cl                136 samples/mei/mei-amt-version.c 	mei_msg(me, "max_message_length %d\n", cl->max_msg_length);
cl                137 samples/mei/mei-amt-version.c 	mei_msg(me, "protocol_version %d\n", cl->protocol_version);
cl                140 samples/mei/mei-amt-version.c 	     (cl->protocol_version != req_protocol_version)) {
cl                145 samples/mei/mei-amt-version.c 	me->buf_size = cl->max_msg_length;
cl                146 samples/mei/mei-amt-version.c 	me->prot_ver = cl->protocol_version;
cl                732 security/apparmor/lsm.c 	struct aa_label *cl, *tl;
cl                739 security/apparmor/lsm.c 		cl = aa_get_newest_cred_label(cred);
cl                741 security/apparmor/lsm.c 		error = aa_may_signal(cl, tl, sig);
cl                742 security/apparmor/lsm.c 		aa_put_label(cl);
cl                747 security/apparmor/lsm.c 	cl = __begin_current_label_crit_section();
cl                749 security/apparmor/lsm.c 	error = aa_may_signal(cl, tl, sig);
cl                751 security/apparmor/lsm.c 	__end_current_label_crit_section(cl);
cl               1929 sound/soc/codecs/rt5682.c 	unsigned int cl, val = 0;
cl               1964 sound/soc/codecs/rt5682.c 		cl = RT5682_I2S1_TX_CHL_8 | RT5682_I2S1_RX_CHL_8;
cl               1968 sound/soc/codecs/rt5682.c 		cl = RT5682_I2S1_TX_CHL_16 | RT5682_I2S1_RX_CHL_16;
cl               1972 sound/soc/codecs/rt5682.c 		cl = RT5682_I2S1_TX_CHL_20 | RT5682_I2S1_RX_CHL_20;
cl               1976 sound/soc/codecs/rt5682.c 		cl = RT5682_I2S1_TX_CHL_24 | RT5682_I2S1_RX_CHL_24;
cl               1980 sound/soc/codecs/rt5682.c 		cl = RT5682_I2S1_TX_CHL_32 | RT5682_I2S1_RX_CHL_32;
cl               1989 sound/soc/codecs/rt5682.c 		RT5682_I2S1_TX_CHL_MASK | RT5682_I2S1_RX_CHL_MASK, cl);
cl                687 sound/soc/ti/davinci-mcasp.c 	struct snd_pcm_hw_constraint_list *cl = &mcasp->chconstr[stream];
cl                688 sound/soc/ti/davinci-mcasp.c 	unsigned int *list = (unsigned int *) cl->list;
cl                701 sound/soc/ti/davinci-mcasp.c 	cl->count = count;
cl                 73 tools/perf/arch/x86/util/perf_regs.c 	SDT_NAME_REG(cl,  cx),
cl                158 tools/perf/tests/hists_cumulate.c #define CDSO(cl)  (cl->ms.map->dso->short_name)
cl                159 tools/perf/tests/hists_cumulate.c #define CSYM(cl)  (cl->ms.sym->name)
cl                163 tools/perf/ui/browsers/hists.c static char callchain_list__folded(const struct callchain_list *cl)
cl                165 tools/perf/ui/browsers/hists.c 	return cl->has_children ? tree__folded_sign(cl->unfolded) : ' ';
cl                168 tools/perf/ui/browsers/hists.c static void callchain_list__set_folding(struct callchain_list *cl, bool unfold)
cl                170 tools/perf/ui/browsers/hists.c 	cl->unfolded = unfold ? cl->has_children : false;
cl                313 tools/perf/ui/browsers/hists.c static bool callchain_list__toggle_fold(struct callchain_list *cl)
cl                315 tools/perf/ui/browsers/hists.c 	if (!cl)
cl                318 tools/perf/ui/browsers/hists.c 	if (!cl->has_children)
cl                321 tools/perf/ui/browsers/hists.c 	cl->unfolded = !cl->unfolded;
cl                397 tools/perf/ui/browsers/hists.c 	struct callchain_list *cl = container_of(ms, struct callchain_list, ms);
cl                406 tools/perf/ui/browsers/hists.c 		has_children = callchain_list__toggle_fold(cl);
cl               1147 tools/perf/util/callchain.c char *callchain_list__sym_name(struct callchain_list *cl,
cl               1154 tools/perf/util/callchain.c 	if (cl->ms.sym) {
cl               1155 tools/perf/util/callchain.c 		const char *inlined = cl->ms.sym->inlined ? " (inlined)" : "";
cl               1157 tools/perf/util/callchain.c 		if (show_srcline && cl->srcline)
cl               1159 tools/perf/util/callchain.c 					    cl->ms.sym->name, cl->srcline,
cl               1163 tools/perf/util/callchain.c 					    cl->ms.sym->name, inlined);
cl               1165 tools/perf/util/callchain.c 		printed = scnprintf(bf, bfsize, "%#" PRIx64, cl->ip);
cl               1169 tools/perf/util/callchain.c 			  cl->ms.map ?
cl               1170 tools/perf/util/callchain.c 			  cl->ms.map->dso->short_name :
cl                275 tools/perf/util/callchain.h char *callchain_list__sym_name(struct callchain_list *cl,
cl               1106 tools/perf/util/data-convert-bt.c #define ADD_FIELD(cl, t, n)						\
cl               1109 tools/perf/util/data-convert-bt.c 		if (bt_ctf_event_class_add_field(cl, t, n)) {		\