lc                885 arch/ia64/include/asm/sal.h 	u64 lc;			/* Loop Count */
lc                 60 arch/ia64/kernel/entry.h 	.spillsp ar.pfs,SW(AR_PFS)+16+(off); .spillsp ar.lc,SW(AR_LC)+16+(off);	\
lc                830 arch/ia64/kernel/ptrace.c 	unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val;
lc                856 arch/ia64/kernel/ptrace.c 	    || access_uarea(child, PT_AR_LC, &lc, 0) < 0
lc                878 arch/ia64/kernel/ptrace.c 	retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]);
lc                974 arch/ia64/kernel/ptrace.c 	unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
lc               1014 arch/ia64/kernel/ptrace.c 	retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]);
lc               1107 arch/ia64/kernel/ptrace.c 	retval |= access_uarea(child, PT_AR_LC, &lc, 1);
lc                 34 arch/nds32/include/asm/ptrace.h 			long lc;
lc                 21 arch/nds32/include/uapi/asm/ptrace.h 	long lc;
lc                198 arch/nds32/kernel/process.c 	childregs->lc = 0;
lc                123 arch/nds32/kernel/signal.c 	__get_user_error(regs->lc, &sf->uc.uc_mcontext.zol.nds32_lc, err);
lc                209 arch/nds32/kernel/signal.c 	__put_user_error(regs->lc, &sf->uc.uc_mcontext.zol.nds32_lc, err);
lc                126 arch/powerpc/kernel/io.c 	u32 lc = c;
lc                127 arch/powerpc/kernel/io.c 	lc |= lc << 8;
lc                128 arch/powerpc/kernel/io.c 	lc |= lc << 16;
lc                137 arch/powerpc/kernel/io.c 		*((volatile u32 *)p) = lc;
lc                 97 arch/s390/include/asm/nmi.h void nmi_alloc_boot_cpu(struct lowcore *lc);
lc                 98 arch/s390/include/asm/nmi.h int nmi_alloc_per_cpu(struct lowcore *lc);
lc                 99 arch/s390/include/asm/nmi.h void nmi_free_per_cpu(struct lowcore *lc);
lc                 71 arch/s390/include/asm/processor.h 	struct lowcore *lc = lowcore_ptr[cpu];
lc                 72 arch/s390/include/asm/processor.h 	return !!(lc->cpu_flags & (1UL << flag));
lc                 87 arch/s390/kernel/crash_dump.c 	struct lowcore *lc;
lc                 89 arch/s390/kernel/crash_dump.c 	lc = (struct lowcore *)(regs - __LC_FPREGS_SAVE_AREA);
lc                 90 arch/s390/kernel/crash_dump.c 	memcpy(&sa->psw, &lc->psw_save_area, sizeof(sa->psw));
lc                 91 arch/s390/kernel/crash_dump.c 	memcpy(&sa->ctrs, &lc->cregs_save_area, sizeof(sa->ctrs));
lc                 92 arch/s390/kernel/crash_dump.c 	memcpy(&sa->gprs, &lc->gpregs_save_area, sizeof(sa->gprs));
lc                 93 arch/s390/kernel/crash_dump.c 	memcpy(&sa->acrs, &lc->access_regs_save_area, sizeof(sa->acrs));
lc                 94 arch/s390/kernel/crash_dump.c 	memcpy(&sa->fprs, &lc->floating_pt_save_area, sizeof(sa->fprs));
lc                 95 arch/s390/kernel/crash_dump.c 	memcpy(&sa->fpc, &lc->fpt_creg_save_area, sizeof(sa->fpc));
lc                 96 arch/s390/kernel/crash_dump.c 	memcpy(&sa->prefix, &lc->prefixreg_save_area, sizeof(sa->prefix));
lc                 97 arch/s390/kernel/crash_dump.c 	memcpy(&sa->todpreg, &lc->tod_progreg_save_area, sizeof(sa->todpreg));
lc                 98 arch/s390/kernel/crash_dump.c 	memcpy(&sa->timer, &lc->cpu_timer_save_area, sizeof(sa->timer));
lc                 99 arch/s390/kernel/crash_dump.c 	memcpy(&sa->todcmp, &lc->clock_comp_save_area, sizeof(sa->todcmp));
lc                 67 arch/s390/kernel/nmi.c void __init nmi_alloc_boot_cpu(struct lowcore *lc)
lc                 71 arch/s390/kernel/nmi.c 	lc->mcesad = (unsigned long) &boot_mcesa;
lc                 73 arch/s390/kernel/nmi.c 		lc->mcesad |= ilog2(MCESA_MAX_SIZE);
lc                103 arch/s390/kernel/nmi.c int nmi_alloc_per_cpu(struct lowcore *lc)
lc                114 arch/s390/kernel/nmi.c 	lc->mcesad = origin | mcesa_origin_lc;
lc                118 arch/s390/kernel/nmi.c void nmi_free_per_cpu(struct lowcore *lc)
lc                122 arch/s390/kernel/nmi.c 	kmem_cache_free(mcesa_cache, (void *)(lc->mcesad & MCESA_ORIGIN_MASK));
lc                383 arch/s390/kernel/setup.c 	struct lowcore *lc;
lc                389 arch/s390/kernel/setup.c 	lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc));
lc                390 arch/s390/kernel/setup.c 	if (!lc)
lc                392 arch/s390/kernel/setup.c 		      __func__, sizeof(*lc), sizeof(*lc));
lc                394 arch/s390/kernel/setup.c 	lc->restart_psw.mask = PSW_KERNEL_BITS;
lc                395 arch/s390/kernel/setup.c 	lc->restart_psw.addr = (unsigned long) restart_int_handler;
lc                396 arch/s390/kernel/setup.c 	lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
lc                397 arch/s390/kernel/setup.c 	lc->external_new_psw.addr = (unsigned long) ext_int_handler;
lc                398 arch/s390/kernel/setup.c 	lc->svc_new_psw.mask = PSW_KERNEL_BITS |
lc                400 arch/s390/kernel/setup.c 	lc->svc_new_psw.addr = (unsigned long) system_call;
lc                401 arch/s390/kernel/setup.c 	lc->program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
lc                402 arch/s390/kernel/setup.c 	lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
lc                403 arch/s390/kernel/setup.c 	lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
lc                404 arch/s390/kernel/setup.c 	lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler;
lc                405 arch/s390/kernel/setup.c 	lc->io_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
lc                406 arch/s390/kernel/setup.c 	lc->io_new_psw.addr = (unsigned long) io_int_handler;
lc                407 arch/s390/kernel/setup.c 	lc->clock_comparator = clock_comparator_max;
lc                408 arch/s390/kernel/setup.c 	lc->nodat_stack = ((unsigned long) &init_thread_union)
lc                410 arch/s390/kernel/setup.c 	lc->current_task = (unsigned long)&init_task;
lc                411 arch/s390/kernel/setup.c 	lc->lpp = LPP_MAGIC;
lc                412 arch/s390/kernel/setup.c 	lc->machine_flags = S390_lowcore.machine_flags;
lc                413 arch/s390/kernel/setup.c 	lc->preempt_count = S390_lowcore.preempt_count;
lc                414 arch/s390/kernel/setup.c 	lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
lc                415 arch/s390/kernel/setup.c 	memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
lc                416 arch/s390/kernel/setup.c 	       sizeof(lc->stfle_fac_list));
lc                417 arch/s390/kernel/setup.c 	memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
lc                418 arch/s390/kernel/setup.c 	       sizeof(lc->alt_stfle_fac_list));
lc                419 arch/s390/kernel/setup.c 	nmi_alloc_boot_cpu(lc);
lc                420 arch/s390/kernel/setup.c 	vdso_alloc_boot_cpu(lc);
lc                421 arch/s390/kernel/setup.c 	lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
lc                422 arch/s390/kernel/setup.c 	lc->async_enter_timer = S390_lowcore.async_enter_timer;
lc                423 arch/s390/kernel/setup.c 	lc->exit_timer = S390_lowcore.exit_timer;
lc                424 arch/s390/kernel/setup.c 	lc->user_timer = S390_lowcore.user_timer;
lc                425 arch/s390/kernel/setup.c 	lc->system_timer = S390_lowcore.system_timer;
lc                426 arch/s390/kernel/setup.c 	lc->steal_timer = S390_lowcore.steal_timer;
lc                427 arch/s390/kernel/setup.c 	lc->last_update_timer = S390_lowcore.last_update_timer;
lc                428 arch/s390/kernel/setup.c 	lc->last_update_clock = S390_lowcore.last_update_clock;
lc                445 arch/s390/kernel/setup.c 	lc->restart_stack = (unsigned long) restart_stack;
lc                446 arch/s390/kernel/setup.c 	lc->restart_fn = (unsigned long) do_restart;
lc                447 arch/s390/kernel/setup.c 	lc->restart_data = 0;
lc                448 arch/s390/kernel/setup.c 	lc->restart_source = -1UL;
lc                451 arch/s390/kernel/setup.c 	mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack);
lc                452 arch/s390/kernel/setup.c 	mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn);
lc                453 arch/s390/kernel/setup.c 	mem_assign_absolute(S390_lowcore.restart_data, lc->restart_data);
lc                454 arch/s390/kernel/setup.c 	mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source);
lc                455 arch/s390/kernel/setup.c 	mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw);
lc                457 arch/s390/kernel/setup.c 	lc->spinlock_lockval = arch_spin_lockval(0);
lc                458 arch/s390/kernel/setup.c 	lc->spinlock_index = 0;
lc                460 arch/s390/kernel/setup.c 	lc->br_r1_trampoline = 0x07f1;	/* br %r1 */
lc                461 arch/s390/kernel/setup.c 	lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
lc                462 arch/s390/kernel/setup.c 	lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
lc                464 arch/s390/kernel/setup.c 	set_prefix((u32)(unsigned long) lc);
lc                465 arch/s390/kernel/setup.c 	lowcore_ptr[0] = lc;
lc                192 arch/s390/kernel/smp.c 	struct lowcore *lc;
lc                206 arch/s390/kernel/smp.c 	lc = pcpu->lowcore;
lc                207 arch/s390/kernel/smp.c 	memcpy(lc, &S390_lowcore, 512);
lc                208 arch/s390/kernel/smp.c 	memset((char *) lc + 512, 0, sizeof(*lc) - 512);
lc                209 arch/s390/kernel/smp.c 	lc->async_stack = async_stack + STACK_INIT_OFFSET;
lc                210 arch/s390/kernel/smp.c 	lc->nodat_stack = nodat_stack + STACK_INIT_OFFSET;
lc                211 arch/s390/kernel/smp.c 	lc->cpu_nr = cpu;
lc                212 arch/s390/kernel/smp.c 	lc->spinlock_lockval = arch_spin_lockval(cpu);
lc                213 arch/s390/kernel/smp.c 	lc->spinlock_index = 0;
lc                214 arch/s390/kernel/smp.c 	lc->br_r1_trampoline = 0x07f1;	/* br %r1 */
lc                215 arch/s390/kernel/smp.c 	lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
lc                216 arch/s390/kernel/smp.c 	lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
lc                217 arch/s390/kernel/smp.c 	if (nmi_alloc_per_cpu(lc))
lc                219 arch/s390/kernel/smp.c 	if (vdso_alloc_per_cpu(lc))
lc                221 arch/s390/kernel/smp.c 	lowcore_ptr[cpu] = lc;
lc                222 arch/s390/kernel/smp.c 	pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
lc                226 arch/s390/kernel/smp.c 	nmi_free_per_cpu(lc);
lc                258 arch/s390/kernel/smp.c 	struct lowcore *lc = pcpu->lowcore;
lc                262 arch/s390/kernel/smp.c 	lc->cpu_nr = cpu;
lc                263 arch/s390/kernel/smp.c 	lc->spinlock_lockval = arch_spin_lockval(cpu);
lc                264 arch/s390/kernel/smp.c 	lc->spinlock_index = 0;
lc                265 arch/s390/kernel/smp.c 	lc->percpu_offset = __per_cpu_offset[cpu];
lc                266 arch/s390/kernel/smp.c 	lc->kernel_asce = S390_lowcore.kernel_asce;
lc                267 arch/s390/kernel/smp.c 	lc->user_asce = S390_lowcore.kernel_asce;
lc                268 arch/s390/kernel/smp.c 	lc->machine_flags = S390_lowcore.machine_flags;
lc                269 arch/s390/kernel/smp.c 	lc->user_timer = lc->system_timer =
lc                270 arch/s390/kernel/smp.c 		lc->steal_timer = lc->avg_steal_timer = 0;
lc                271 arch/s390/kernel/smp.c 	__ctl_store(lc->cregs_save_area, 0, 15);
lc                272 arch/s390/kernel/smp.c 	lc->cregs_save_area[1] = lc->kernel_asce;
lc                273 arch/s390/kernel/smp.c 	lc->cregs_save_area[7] = lc->vdso_asce;
lc                274 arch/s390/kernel/smp.c 	save_access_regs((unsigned int *) lc->access_regs_save_area);
lc                275 arch/s390/kernel/smp.c 	memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
lc                276 arch/s390/kernel/smp.c 	       sizeof(lc->stfle_fac_list));
lc                277 arch/s390/kernel/smp.c 	memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
lc                278 arch/s390/kernel/smp.c 	       sizeof(lc->alt_stfle_fac_list));
lc                284 arch/s390/kernel/smp.c 	struct lowcore *lc = pcpu->lowcore;
lc                286 arch/s390/kernel/smp.c 	lc->kernel_stack = (unsigned long) task_stack_page(tsk)
lc                288 arch/s390/kernel/smp.c 	lc->current_task = (unsigned long) tsk;
lc                289 arch/s390/kernel/smp.c 	lc->lpp = LPP_MAGIC;
lc                290 arch/s390/kernel/smp.c 	lc->current_pid = tsk->pid;
lc                291 arch/s390/kernel/smp.c 	lc->user_timer = tsk->thread.user_timer;
lc                292 arch/s390/kernel/smp.c 	lc->guest_timer = tsk->thread.guest_timer;
lc                293 arch/s390/kernel/smp.c 	lc->system_timer = tsk->thread.system_timer;
lc                294 arch/s390/kernel/smp.c 	lc->hardirq_timer = tsk->thread.hardirq_timer;
lc                295 arch/s390/kernel/smp.c 	lc->softirq_timer = tsk->thread.softirq_timer;
lc                296 arch/s390/kernel/smp.c 	lc->steal_timer = 0;
lc                301 arch/s390/kernel/smp.c 	struct lowcore *lc = pcpu->lowcore;
lc                303 arch/s390/kernel/smp.c 	lc->restart_stack = lc->nodat_stack;
lc                304 arch/s390/kernel/smp.c 	lc->restart_fn = (unsigned long) func;
lc                305 arch/s390/kernel/smp.c 	lc->restart_data = (unsigned long) data;
lc                306 arch/s390/kernel/smp.c 	lc->restart_source = -1UL;
lc                322 arch/s390/kernel/smp.c 	struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
lc                331 arch/s390/kernel/smp.c 	mem_assign_absolute(lc->restart_stack, stack);
lc                332 arch/s390/kernel/smp.c 	mem_assign_absolute(lc->restart_fn, (unsigned long) func);
lc                333 arch/s390/kernel/smp.c 	mem_assign_absolute(lc->restart_data, (unsigned long) data);
lc                334 arch/s390/kernel/smp.c 	mem_assign_absolute(lc->restart_source, source_cpu);
lc                387 arch/s390/kernel/smp.c 	struct lowcore *lc = pcpu_devices->lowcore;
lc                390 arch/s390/kernel/smp.c 		lc = &S390_lowcore;
lc                393 arch/s390/kernel/smp.c 		      lc->nodat_stack);
lc                551 arch/s390/kvm/interrupt.c 	unsigned long lc;
lc                567 arch/s390/kvm/interrupt.c 	lc = ext_sa_addr & MCESA_LC_MASK;
lc                569 arch/s390/kvm/interrupt.c 		switch (lc) {
lc                596 arch/s390/kvm/interrupt.c 	    && (lc == 11 || lc == 12)) {
lc                195 arch/s390/mm/maccess.c 	unsigned long lc;
lc                201 arch/s390/mm/maccess.c 		lc = (unsigned long) lowcore_ptr[cpu];
lc                202 arch/s390/mm/maccess.c 		if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc)
lc                268 arch/x86/boot/string.c 		unsigned int lc = c | 0x20; /* don't tolower() this line */
lc                273 arch/x86/boot/string.c 		else if ('a' <= lc && lc <= 'f')
lc                274 arch/x86/boot/string.c 			val = lc - 'a' + 10;
lc               1505 drivers/gpu/drm/bridge/tc358767.c 		bool lc = val & INT_GPIO_LC(tc->hpd_pin);
lc               1508 drivers/gpu/drm/bridge/tc358767.c 			h ? "H" : "", lc ? "LC" : "");
lc               1510 drivers/gpu/drm/bridge/tc358767.c 		if (h || lc)
lc                 31 drivers/md/dm-linear.c 	struct linear_c *lc;
lc                 41 drivers/md/dm-linear.c 	lc = kmalloc(sizeof(*lc), GFP_KERNEL);
lc                 42 drivers/md/dm-linear.c 	if (lc == NULL) {
lc                 52 drivers/md/dm-linear.c 	lc->start = tmp;
lc                 54 drivers/md/dm-linear.c 	ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev);
lc                 65 drivers/md/dm-linear.c 	ti->private = lc;
lc                 69 drivers/md/dm-linear.c 	kfree(lc);
lc                 75 drivers/md/dm-linear.c 	struct linear_c *lc = (struct linear_c *) ti->private;
lc                 77 drivers/md/dm-linear.c 	dm_put_device(ti, lc->dev);
lc                 78 drivers/md/dm-linear.c 	kfree(lc);
lc                 83 drivers/md/dm-linear.c 	struct linear_c *lc = ti->private;
lc                 85 drivers/md/dm-linear.c 	return lc->start + dm_target_offset(ti, bi_sector);
lc                 90 drivers/md/dm-linear.c 	struct linear_c *lc = ti->private;
lc                 92 drivers/md/dm-linear.c 	bio_set_dev(bio, lc->dev->bdev);
lc                108 drivers/md/dm-linear.c 	struct linear_c *lc = (struct linear_c *) ti->private;
lc                116 drivers/md/dm-linear.c 		snprintf(result, maxlen, "%s %llu", lc->dev->name,
lc                117 drivers/md/dm-linear.c 				(unsigned long long)lc->start);
lc                124 drivers/md/dm-linear.c 	struct linear_c *lc = (struct linear_c *) ti->private;
lc                125 drivers/md/dm-linear.c 	struct dm_dev *dev = lc->dev;
lc                132 drivers/md/dm-linear.c 	if (lc->start ||
lc                142 drivers/md/dm-linear.c 	struct linear_c *lc = (struct linear_c *) ti->private;
lc                146 drivers/md/dm-linear.c 	ret = blkdev_report_zones(lc->dev->bdev, linear_map_sector(ti, sector),
lc                152 drivers/md/dm-linear.c 		dm_remap_zone_report(ti, lc->start, zones, nr_zones);
lc                160 drivers/md/dm-linear.c 	struct linear_c *lc = ti->private;
lc                162 drivers/md/dm-linear.c 	return fn(ti, lc->dev, lc->start, ti->len, data);
lc                170 drivers/md/dm-linear.c 	struct linear_c *lc = ti->private;
lc                171 drivers/md/dm-linear.c 	struct block_device *bdev = lc->dev->bdev;
lc                172 drivers/md/dm-linear.c 	struct dax_device *dax_dev = lc->dev->dax_dev;
lc                185 drivers/md/dm-linear.c 	struct linear_c *lc = ti->private;
lc                186 drivers/md/dm-linear.c 	struct block_device *bdev = lc->dev->bdev;
lc                187 drivers/md/dm-linear.c 	struct dax_device *dax_dev = lc->dev->dax_dev;
lc                199 drivers/md/dm-linear.c 	struct linear_c *lc = ti->private;
lc                200 drivers/md/dm-linear.c 	struct block_device *bdev = lc->dev->bdev;
lc                201 drivers/md/dm-linear.c 	struct dax_device *dax_dev = lc->dev->dax_dev;
lc                 84 drivers/md/dm-log-userspace-base.c static int userspace_do_request(struct log_c *lc, const char *uuid,
lc                 96 drivers/md/dm-log-userspace-base.c 	r = dm_consult_userspace(uuid, lc->luid, request_type, data,
lc                107 drivers/md/dm-log-userspace-base.c 		r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_CTR,
lc                108 drivers/md/dm-log-userspace-base.c 					 lc->usr_argv_str,
lc                109 drivers/md/dm-log-userspace-base.c 					 strlen(lc->usr_argv_str) + 1,
lc                115 drivers/md/dm-log-userspace-base.c 	r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_RESUME, NULL,
lc                159 drivers/md/dm-log-userspace-base.c 	struct log_c *lc = container_of(work, struct log_c, flush_log_work.work);
lc                161 drivers/md/dm-log-userspace-base.c 	atomic_set(&lc->sched_flush, 0);
lc                163 drivers/md/dm-log-userspace-base.c 	r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH, NULL, 0, NULL, NULL);
lc                166 drivers/md/dm-log-userspace-base.c 		dm_table_event(lc->ti->table);
lc                196 drivers/md/dm-log-userspace-base.c 	struct log_c *lc = NULL;
lc                207 drivers/md/dm-log-userspace-base.c 	lc = kzalloc(sizeof(*lc), GFP_KERNEL);
lc                208 drivers/md/dm-log-userspace-base.c 	if (!lc) {
lc                214 drivers/md/dm-log-userspace-base.c 	lc->luid = (unsigned long)lc;
lc                216 drivers/md/dm-log-userspace-base.c 	lc->ti = ti;
lc                220 drivers/md/dm-log-userspace-base.c 		kfree(lc);
lc                224 drivers/md/dm-log-userspace-base.c 	lc->usr_argc = argc;
lc                226 drivers/md/dm-log-userspace-base.c 	strncpy(lc->uuid, argv[0], DM_UUID_LEN);
lc                229 drivers/md/dm-log-userspace-base.c 	spin_lock_init(&lc->flush_lock);
lc                230 drivers/md/dm-log-userspace-base.c 	INIT_LIST_HEAD(&lc->mark_list);
lc                231 drivers/md/dm-log-userspace-base.c 	INIT_LIST_HEAD(&lc->clear_list);
lc                234 drivers/md/dm-log-userspace-base.c 		lc->integrated_flush = 1;
lc                241 drivers/md/dm-log-userspace-base.c 		kfree(lc);
lc                252 drivers/md/dm-log-userspace-base.c 	r = mempool_init_slab_pool(&lc->flush_entry_pool, FLUSH_ENTRY_POOL_SIZE,
lc                262 drivers/md/dm-log-userspace-base.c 	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_CTR,
lc                276 drivers/md/dm-log-userspace-base.c 	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_GET_REGION_SIZE,
lc                284 drivers/md/dm-log-userspace-base.c 	lc->region_size = (uint32_t)rdata;
lc                285 drivers/md/dm-log-userspace-base.c 	lc->region_count = dm_sector_div_up(ti->len, lc->region_size);
lc                294 drivers/md/dm-log-userspace-base.c 				  dm_table_get_mode(ti->table), &lc->log_dev);
lc                300 drivers/md/dm-log-userspace-base.c 	if (lc->integrated_flush) {
lc                301 drivers/md/dm-log-userspace-base.c 		lc->dmlog_wq = alloc_workqueue("dmlogd", WQ_MEM_RECLAIM, 0);
lc                302 drivers/md/dm-log-userspace-base.c 		if (!lc->dmlog_wq) {
lc                308 drivers/md/dm-log-userspace-base.c 		INIT_DELAYED_WORK(&lc->flush_log_work, do_flush);
lc                309 drivers/md/dm-log-userspace-base.c 		atomic_set(&lc->sched_flush, 0);
lc                315 drivers/md/dm-log-userspace-base.c 		mempool_exit(&lc->flush_entry_pool);
lc                316 drivers/md/dm-log-userspace-base.c 		kfree(lc);
lc                319 drivers/md/dm-log-userspace-base.c 		lc->usr_argv_str = ctr_str;
lc                320 drivers/md/dm-log-userspace-base.c 		log->context = lc;
lc                328 drivers/md/dm-log-userspace-base.c 	struct log_c *lc = log->context;
lc                330 drivers/md/dm-log-userspace-base.c 	if (lc->integrated_flush) {
lc                332 drivers/md/dm-log-userspace-base.c 		if (atomic_read(&lc->sched_flush))
lc                333 drivers/md/dm-log-userspace-base.c 			flush_delayed_work(&lc->flush_log_work);
lc                335 drivers/md/dm-log-userspace-base.c 		destroy_workqueue(lc->dmlog_wq);
lc                338 drivers/md/dm-log-userspace-base.c 	(void) dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR,
lc                341 drivers/md/dm-log-userspace-base.c 	if (lc->log_dev)
lc                342 drivers/md/dm-log-userspace-base.c 		dm_put_device(lc->ti, lc->log_dev);
lc                344 drivers/md/dm-log-userspace-base.c 	mempool_exit(&lc->flush_entry_pool);
lc                346 drivers/md/dm-log-userspace-base.c 	kfree(lc->usr_argv_str);
lc                347 drivers/md/dm-log-userspace-base.c 	kfree(lc);
lc                355 drivers/md/dm-log-userspace-base.c 	struct log_c *lc = log->context;
lc                357 drivers/md/dm-log-userspace-base.c 	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_PRESUSPEND,
lc                366 drivers/md/dm-log-userspace-base.c 	struct log_c *lc = log->context;
lc                371 drivers/md/dm-log-userspace-base.c 	if (lc->integrated_flush && atomic_read(&lc->sched_flush))
lc                372 drivers/md/dm-log-userspace-base.c 		flush_delayed_work(&lc->flush_log_work);
lc                374 drivers/md/dm-log-userspace-base.c 	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_POSTSUSPEND,
lc                383 drivers/md/dm-log-userspace-base.c 	struct log_c *lc = log->context;
lc                385 drivers/md/dm-log-userspace-base.c 	lc->in_sync_hint = 0;
lc                386 drivers/md/dm-log-userspace-base.c 	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_RESUME,
lc                394 drivers/md/dm-log-userspace-base.c 	struct log_c *lc = log->context;
lc                396 drivers/md/dm-log-userspace-base.c 	return lc->region_size;
lc                413 drivers/md/dm-log-userspace-base.c 	struct log_c *lc = log->context;
lc                416 drivers/md/dm-log-userspace-base.c 	r = userspace_do_request(lc, lc->uuid, DM_ULOG_IS_CLEAN,
lc                441 drivers/md/dm-log-userspace-base.c 	struct log_c *lc = log->context;
lc                460 drivers/md/dm-log-userspace-base.c 	r = userspace_do_request(lc, lc->uuid, DM_ULOG_IN_SYNC,
lc                466 drivers/md/dm-log-userspace-base.c static int flush_one_by_one(struct log_c *lc, struct list_head *flush_list)
lc                472 drivers/md/dm-log-userspace-base.c 		r = userspace_do_request(lc, lc->uuid, fe->type,
lc                483 drivers/md/dm-log-userspace-base.c static int flush_by_group(struct log_c *lc, struct list_head *flush_list,
lc                511 drivers/md/dm-log-userspace-base.c 			r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH,
lc                521 drivers/md/dm-log-userspace-base.c 			r = userspace_do_request(lc, lc->uuid, type,
lc                530 drivers/md/dm-log-userspace-base.c 				r = flush_one_by_one(lc, flush_list);
lc                566 drivers/md/dm-log-userspace-base.c 	struct log_c *lc = log->context;
lc                572 drivers/md/dm-log-userspace-base.c 	mempool_t *flush_entry_pool = &lc->flush_entry_pool;
lc                574 drivers/md/dm-log-userspace-base.c 	spin_lock_irqsave(&lc->flush_lock, flags);
lc                575 drivers/md/dm-log-userspace-base.c 	list_splice_init(&lc->mark_list, &mark_list);
lc                576 drivers/md/dm-log-userspace-base.c 	list_splice_init(&lc->clear_list, &clear_list);
lc                577 drivers/md/dm-log-userspace-base.c 	spin_unlock_irqrestore(&lc->flush_lock, flags);
lc                585 drivers/md/dm-log-userspace-base.c 	r = flush_by_group(lc, &clear_list, 0);
lc                589 drivers/md/dm-log-userspace-base.c 	if (!lc->integrated_flush) {
lc                590 drivers/md/dm-log-userspace-base.c 		r = flush_by_group(lc, &mark_list, 0);
lc                593 drivers/md/dm-log-userspace-base.c 		r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH,
lc                601 drivers/md/dm-log-userspace-base.c 	r = flush_by_group(lc, &mark_list, 1);
lc                605 drivers/md/dm-log-userspace-base.c 	if (mark_list_is_empty && !atomic_read(&lc->sched_flush)) {
lc                610 drivers/md/dm-log-userspace-base.c 		queue_delayed_work(lc->dmlog_wq, &lc->flush_log_work, 3 * HZ);
lc                611 drivers/md/dm-log-userspace-base.c 		atomic_set(&lc->sched_flush, 1);
lc                617 drivers/md/dm-log-userspace-base.c 		cancel_delayed_work(&lc->flush_log_work);
lc                618 drivers/md/dm-log-userspace-base.c 		atomic_set(&lc->sched_flush, 0);
lc                637 drivers/md/dm-log-userspace-base.c 		dm_table_event(lc->ti->table);
lc                651 drivers/md/dm-log-userspace-base.c 	struct log_c *lc = log->context;
lc                655 drivers/md/dm-log-userspace-base.c 	fe = mempool_alloc(&lc->flush_entry_pool, GFP_NOIO);
lc                658 drivers/md/dm-log-userspace-base.c 	spin_lock_irqsave(&lc->flush_lock, flags);
lc                661 drivers/md/dm-log-userspace-base.c 	list_add(&fe->list, &lc->mark_list);
lc                662 drivers/md/dm-log-userspace-base.c 	spin_unlock_irqrestore(&lc->flush_lock, flags);
lc                680 drivers/md/dm-log-userspace-base.c 	struct log_c *lc = log->context;
lc                689 drivers/md/dm-log-userspace-base.c 	fe = mempool_alloc(&lc->flush_entry_pool, GFP_ATOMIC);
lc                695 drivers/md/dm-log-userspace-base.c 	spin_lock_irqsave(&lc->flush_lock, flags);
lc                698 drivers/md/dm-log-userspace-base.c 	list_add(&fe->list, &lc->clear_list);
lc                699 drivers/md/dm-log-userspace-base.c 	spin_unlock_irqrestore(&lc->flush_lock, flags);
lc                716 drivers/md/dm-log-userspace-base.c 	struct log_c *lc = log->context;
lc                722 drivers/md/dm-log-userspace-base.c 	if (lc->in_sync_hint >= lc->region_count)
lc                726 drivers/md/dm-log-userspace-base.c 	r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_RESYNC_WORK,
lc                742 drivers/md/dm-log-userspace-base.c 	struct log_c *lc = log->context;
lc                751 drivers/md/dm-log-userspace-base.c 	(void) userspace_do_request(lc, lc->uuid, DM_ULOG_SET_REGION_SYNC,
lc                774 drivers/md/dm-log-userspace-base.c 	struct log_c *lc = log->context;
lc                777 drivers/md/dm-log-userspace-base.c 	r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_SYNC_COUNT,
lc                783 drivers/md/dm-log-userspace-base.c 	if (sync_count >= lc->region_count)
lc                784 drivers/md/dm-log-userspace-base.c 		lc->in_sync_hint = lc->region_count;
lc                800 drivers/md/dm-log-userspace-base.c 	struct log_c *lc = log->context;
lc                804 drivers/md/dm-log-userspace-base.c 		r = userspace_do_request(lc, lc->uuid, DM_ULOG_STATUS_INFO,
lc                814 drivers/md/dm-log-userspace-base.c 		table_args = strchr(lc->usr_argv_str, ' ');
lc                818 drivers/md/dm-log-userspace-base.c 		DMEMIT("%s %u %s ", log->type->name, lc->usr_argc, lc->uuid);
lc                819 drivers/md/dm-log-userspace-base.c 		if (lc->integrated_flush)
lc                837 drivers/md/dm-log-userspace-base.c 	struct log_c *lc = log->context;
lc                852 drivers/md/dm-log-userspace-base.c 	if (region < lc->in_sync_hint)
lc                858 drivers/md/dm-log-userspace-base.c 	r = userspace_do_request(lc, lc->uuid, DM_ULOG_IS_REMOTE_RECOVERING,
lc                864 drivers/md/dm-log-userspace-base.c 	lc->in_sync_hint = pkg.in_sync_hint;
lc                137 drivers/md/dm-log-writes.c static inline sector_t bio_to_dev_sectors(struct log_writes_c *lc,
lc                140 drivers/md/dm-log-writes.c 	return sectors >> (lc->sectorshift - SECTOR_SHIFT);
lc                143 drivers/md/dm-log-writes.c static inline sector_t dev_to_bio_sectors(struct log_writes_c *lc,
lc                146 drivers/md/dm-log-writes.c 	return sectors << (lc->sectorshift - SECTOR_SHIFT);
lc                149 drivers/md/dm-log-writes.c static void put_pending_block(struct log_writes_c *lc)
lc                151 drivers/md/dm-log-writes.c 	if (atomic_dec_and_test(&lc->pending_blocks)) {
lc                153 drivers/md/dm-log-writes.c 		if (waitqueue_active(&lc->wait))
lc                154 drivers/md/dm-log-writes.c 			wake_up(&lc->wait);
lc                158 drivers/md/dm-log-writes.c static void put_io_block(struct log_writes_c *lc)
lc                160 drivers/md/dm-log-writes.c 	if (atomic_dec_and_test(&lc->io_blocks)) {
lc                162 drivers/md/dm-log-writes.c 		if (waitqueue_active(&lc->wait))
lc                163 drivers/md/dm-log-writes.c 			wake_up(&lc->wait);
lc                169 drivers/md/dm-log-writes.c 	struct log_writes_c *lc = bio->bi_private;
lc                175 drivers/md/dm-log-writes.c 		spin_lock_irqsave(&lc->blocks_lock, flags);
lc                176 drivers/md/dm-log-writes.c 		lc->logging_enabled = false;
lc                177 drivers/md/dm-log-writes.c 		spin_unlock_irqrestore(&lc->blocks_lock, flags);
lc                181 drivers/md/dm-log-writes.c 	put_io_block(lc);
lc                187 drivers/md/dm-log-writes.c 	struct log_writes_c *lc = bio->bi_private;
lc                189 drivers/md/dm-log-writes.c 	complete(&lc->super_done);
lc                197 drivers/md/dm-log-writes.c static void free_pending_block(struct log_writes_c *lc,
lc                208 drivers/md/dm-log-writes.c 	put_pending_block(lc);
lc                211 drivers/md/dm-log-writes.c static int write_metadata(struct log_writes_c *lc, void *entry,
lc                227 drivers/md/dm-log-writes.c 	bio_set_dev(bio, lc->logdev->bdev);
lc                230 drivers/md/dm-log-writes.c 	bio->bi_private = lc;
lc                245 drivers/md/dm-log-writes.c 	       lc->sectorsize - entrylen - datalen);
lc                248 drivers/md/dm-log-writes.c 	ret = bio_add_page(bio, page, lc->sectorsize, 0);
lc                249 drivers/md/dm-log-writes.c 	if (ret != lc->sectorsize) {
lc                259 drivers/md/dm-log-writes.c 	put_io_block(lc);
lc                263 drivers/md/dm-log-writes.c static int write_inline_data(struct log_writes_c *lc, void *entry,
lc                277 drivers/md/dm-log-writes.c 		atomic_inc(&lc->io_blocks);
lc                287 drivers/md/dm-log-writes.c 		bio_set_dev(bio, lc->logdev->bdev);
lc                289 drivers/md/dm-log-writes.c 		bio->bi_private = lc;
lc                294 drivers/md/dm-log-writes.c 			pg_sectorlen = ALIGN(pg_datalen, lc->sectorsize);
lc                327 drivers/md/dm-log-writes.c 	put_io_block(lc);
lc                331 drivers/md/dm-log-writes.c static int log_one_block(struct log_writes_c *lc,
lc                345 drivers/md/dm-log-writes.c 	if (write_metadata(lc, &entry, sizeof(entry), block->data,
lc                347 drivers/md/dm-log-writes.c 		free_pending_block(lc, block);
lc                351 drivers/md/dm-log-writes.c 	sector += dev_to_bio_sectors(lc, 1);
lc                354 drivers/md/dm-log-writes.c 		if (write_inline_data(lc, &entry, sizeof(entry), block->data,
lc                356 drivers/md/dm-log-writes.c 			free_pending_block(lc, block);
lc                366 drivers/md/dm-log-writes.c 	atomic_inc(&lc->io_blocks);
lc                374 drivers/md/dm-log-writes.c 	bio_set_dev(bio, lc->logdev->bdev);
lc                376 drivers/md/dm-log-writes.c 	bio->bi_private = lc;
lc                387 drivers/md/dm-log-writes.c 			atomic_inc(&lc->io_blocks);
lc                396 drivers/md/dm-log-writes.c 			bio_set_dev(bio, lc->logdev->bdev);
lc                398 drivers/md/dm-log-writes.c 			bio->bi_private = lc;
lc                415 drivers/md/dm-log-writes.c 	put_pending_block(lc);
lc                418 drivers/md/dm-log-writes.c 	free_pending_block(lc, block);
lc                419 drivers/md/dm-log-writes.c 	put_io_block(lc);
lc                423 drivers/md/dm-log-writes.c static int log_super(struct log_writes_c *lc)
lc                429 drivers/md/dm-log-writes.c 	super.nr_entries = cpu_to_le64(lc->logged_entries);
lc                430 drivers/md/dm-log-writes.c 	super.sectorsize = cpu_to_le32(lc->sectorsize);
lc                432 drivers/md/dm-log-writes.c 	if (write_metadata(lc, &super, sizeof(super), NULL, 0,
lc                442 drivers/md/dm-log-writes.c 	wait_for_completion_io(&lc->super_done);
lc                447 drivers/md/dm-log-writes.c static inline sector_t logdev_last_sector(struct log_writes_c *lc)
lc                449 drivers/md/dm-log-writes.c 	return i_size_read(lc->logdev->bdev->bd_inode) >> SECTOR_SHIFT;
lc                454 drivers/md/dm-log-writes.c 	struct log_writes_c *lc = (struct log_writes_c *)arg;
lc                463 drivers/md/dm-log-writes.c 		spin_lock_irq(&lc->blocks_lock);
lc                464 drivers/md/dm-log-writes.c 		if (!list_empty(&lc->logging_blocks)) {
lc                465 drivers/md/dm-log-writes.c 			block = list_first_entry(&lc->logging_blocks,
lc                468 drivers/md/dm-log-writes.c 			if (!lc->logging_enabled)
lc                471 drivers/md/dm-log-writes.c 			sector = lc->next_sector;
lc                473 drivers/md/dm-log-writes.c 				lc->next_sector += dev_to_bio_sectors(lc, block->nr_sectors);
lc                474 drivers/md/dm-log-writes.c 			lc->next_sector += dev_to_bio_sectors(lc, 1);
lc                480 drivers/md/dm-log-writes.c 			if (!lc->end_sector)
lc                481 drivers/md/dm-log-writes.c 				lc->end_sector = logdev_last_sector(lc);
lc                482 drivers/md/dm-log-writes.c 			if (lc->end_sector &&
lc                483 drivers/md/dm-log-writes.c 			    lc->next_sector >= lc->end_sector) {
lc                485 drivers/md/dm-log-writes.c 				lc->logging_enabled = false;
lc                488 drivers/md/dm-log-writes.c 			lc->logged_entries++;
lc                489 drivers/md/dm-log-writes.c 			atomic_inc(&lc->io_blocks);
lc                493 drivers/md/dm-log-writes.c 				atomic_inc(&lc->io_blocks);
lc                496 drivers/md/dm-log-writes.c 		logging_enabled = lc->logging_enabled;
lc                497 drivers/md/dm-log-writes.c 		spin_unlock_irq(&lc->blocks_lock);
lc                500 drivers/md/dm-log-writes.c 				ret = log_one_block(lc, block, sector);
lc                502 drivers/md/dm-log-writes.c 					ret = log_super(lc);
lc                504 drivers/md/dm-log-writes.c 					spin_lock_irq(&lc->blocks_lock);
lc                505 drivers/md/dm-log-writes.c 					lc->logging_enabled = false;
lc                506 drivers/md/dm-log-writes.c 					spin_unlock_irq(&lc->blocks_lock);
lc                509 drivers/md/dm-log-writes.c 				free_pending_block(lc, block);
lc                516 drivers/md/dm-log-writes.c 			    list_empty(&lc->logging_blocks))
lc                530 drivers/md/dm-log-writes.c 	struct log_writes_c *lc;
lc                543 drivers/md/dm-log-writes.c 	lc = kzalloc(sizeof(struct log_writes_c), GFP_KERNEL);
lc                544 drivers/md/dm-log-writes.c 	if (!lc) {
lc                548 drivers/md/dm-log-writes.c 	spin_lock_init(&lc->blocks_lock);
lc                549 drivers/md/dm-log-writes.c 	INIT_LIST_HEAD(&lc->unflushed_blocks);
lc                550 drivers/md/dm-log-writes.c 	INIT_LIST_HEAD(&lc->logging_blocks);
lc                551 drivers/md/dm-log-writes.c 	init_waitqueue_head(&lc->wait);
lc                552 drivers/md/dm-log-writes.c 	init_completion(&lc->super_done);
lc                553 drivers/md/dm-log-writes.c 	atomic_set(&lc->io_blocks, 0);
lc                554 drivers/md/dm-log-writes.c 	atomic_set(&lc->pending_blocks, 0);
lc                557 drivers/md/dm-log-writes.c 	ret = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &lc->dev);
lc                565 drivers/md/dm-log-writes.c 			    &lc->logdev);
lc                568 drivers/md/dm-log-writes.c 		dm_put_device(ti, lc->dev);
lc                572 drivers/md/dm-log-writes.c 	lc->sectorsize = bdev_logical_block_size(lc->dev->bdev);
lc                573 drivers/md/dm-log-writes.c 	lc->sectorshift = ilog2(lc->sectorsize);
lc                574 drivers/md/dm-log-writes.c 	lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write");
lc                575 drivers/md/dm-log-writes.c 	if (IS_ERR(lc->log_kthread)) {
lc                576 drivers/md/dm-log-writes.c 		ret = PTR_ERR(lc->log_kthread);
lc                578 drivers/md/dm-log-writes.c 		dm_put_device(ti, lc->dev);
lc                579 drivers/md/dm-log-writes.c 		dm_put_device(ti, lc->logdev);
lc                588 drivers/md/dm-log-writes.c 	lc->next_sector = lc->sectorsize >> SECTOR_SHIFT;
lc                589 drivers/md/dm-log-writes.c 	lc->logging_enabled = true;
lc                590 drivers/md/dm-log-writes.c 	lc->end_sector = logdev_last_sector(lc);
lc                591 drivers/md/dm-log-writes.c 	lc->device_supports_discard = true;
lc                598 drivers/md/dm-log-writes.c 	ti->private = lc;
lc                602 drivers/md/dm-log-writes.c 	kfree(lc);
lc                606 drivers/md/dm-log-writes.c static int log_mark(struct log_writes_c *lc, char *data)
lc                609 drivers/md/dm-log-writes.c 	size_t maxsize = lc->sectorsize - sizeof(struct log_write_entry);
lc                623 drivers/md/dm-log-writes.c 	atomic_inc(&lc->pending_blocks);
lc                626 drivers/md/dm-log-writes.c 	spin_lock_irq(&lc->blocks_lock);
lc                627 drivers/md/dm-log-writes.c 	list_add_tail(&block->list, &lc->logging_blocks);
lc                628 drivers/md/dm-log-writes.c 	spin_unlock_irq(&lc->blocks_lock);
lc                629 drivers/md/dm-log-writes.c 	wake_up_process(lc->log_kthread);
lc                635 drivers/md/dm-log-writes.c 	struct log_writes_c *lc = ti->private;
lc                637 drivers/md/dm-log-writes.c 	spin_lock_irq(&lc->blocks_lock);
lc                638 drivers/md/dm-log-writes.c 	list_splice_init(&lc->unflushed_blocks, &lc->logging_blocks);
lc                639 drivers/md/dm-log-writes.c 	spin_unlock_irq(&lc->blocks_lock);
lc                645 drivers/md/dm-log-writes.c 	log_mark(lc, "dm-log-writes-end");
lc                646 drivers/md/dm-log-writes.c 	wake_up_process(lc->log_kthread);
lc                647 drivers/md/dm-log-writes.c 	wait_event(lc->wait, !atomic_read(&lc->io_blocks) &&
lc                648 drivers/md/dm-log-writes.c 		   !atomic_read(&lc->pending_blocks));
lc                649 drivers/md/dm-log-writes.c 	kthread_stop(lc->log_kthread);
lc                651 drivers/md/dm-log-writes.c 	WARN_ON(!list_empty(&lc->logging_blocks));
lc                652 drivers/md/dm-log-writes.c 	WARN_ON(!list_empty(&lc->unflushed_blocks));
lc                653 drivers/md/dm-log-writes.c 	dm_put_device(ti, lc->dev);
lc                654 drivers/md/dm-log-writes.c 	dm_put_device(ti, lc->logdev);
lc                655 drivers/md/dm-log-writes.c 	kfree(lc);
lc                660 drivers/md/dm-log-writes.c 	struct log_writes_c *lc = ti->private;
lc                662 drivers/md/dm-log-writes.c 	bio_set_dev(bio, lc->dev->bdev);
lc                667 drivers/md/dm-log-writes.c 	struct log_writes_c *lc = ti->private;
lc                682 drivers/md/dm-log-writes.c 	if (!lc->logging_enabled)
lc                707 drivers/md/dm-log-writes.c 		spin_lock_irq(&lc->blocks_lock);
lc                708 drivers/md/dm-log-writes.c 		lc->logging_enabled = false;
lc                709 drivers/md/dm-log-writes.c 		spin_unlock_irq(&lc->blocks_lock);
lc                714 drivers/md/dm-log-writes.c 	atomic_inc(&lc->pending_blocks);
lc                725 drivers/md/dm-log-writes.c 	block->sector = bio_to_dev_sectors(lc, bio->bi_iter.bi_sector);
lc                726 drivers/md/dm-log-writes.c 	block->nr_sectors = bio_to_dev_sectors(lc, bio_sectors(bio));
lc                731 drivers/md/dm-log-writes.c 		if (lc->device_supports_discard)
lc                739 drivers/md/dm-log-writes.c 		spin_lock_irq(&lc->blocks_lock);
lc                740 drivers/md/dm-log-writes.c 		list_splice_init(&lc->unflushed_blocks, &block->list);
lc                741 drivers/md/dm-log-writes.c 		spin_unlock_irq(&lc->blocks_lock);
lc                761 drivers/md/dm-log-writes.c 			free_pending_block(lc, block);
lc                762 drivers/md/dm-log-writes.c 			spin_lock_irq(&lc->blocks_lock);
lc                763 drivers/md/dm-log-writes.c 			lc->logging_enabled = false;
lc                764 drivers/md/dm-log-writes.c 			spin_unlock_irq(&lc->blocks_lock);
lc                781 drivers/md/dm-log-writes.c 		spin_lock_irq(&lc->blocks_lock);
lc                782 drivers/md/dm-log-writes.c 		list_splice_init(&lc->unflushed_blocks, &block->list);
lc                783 drivers/md/dm-log-writes.c 		spin_unlock_irq(&lc->blocks_lock);
lc                793 drivers/md/dm-log-writes.c 	struct log_writes_c *lc = ti->private;
lc                800 drivers/md/dm-log-writes.c 		spin_lock_irqsave(&lc->blocks_lock, flags);
lc                802 drivers/md/dm-log-writes.c 			list_splice_tail_init(&block->list, &lc->logging_blocks);
lc                803 drivers/md/dm-log-writes.c 			list_add_tail(&block->list, &lc->logging_blocks);
lc                804 drivers/md/dm-log-writes.c 			wake_up_process(lc->log_kthread);
lc                806 drivers/md/dm-log-writes.c 			list_add_tail(&block->list, &lc->logging_blocks);
lc                807 drivers/md/dm-log-writes.c 			wake_up_process(lc->log_kthread);
lc                809 drivers/md/dm-log-writes.c 			list_add_tail(&block->list, &lc->unflushed_blocks);
lc                810 drivers/md/dm-log-writes.c 		spin_unlock_irqrestore(&lc->blocks_lock, flags);
lc                824 drivers/md/dm-log-writes.c 	struct log_writes_c *lc = ti->private;
lc                828 drivers/md/dm-log-writes.c 		DMEMIT("%llu %llu", lc->logged_entries,
lc                829 drivers/md/dm-log-writes.c 		       (unsigned long long)lc->next_sector - 1);
lc                830 drivers/md/dm-log-writes.c 		if (!lc->logging_enabled)
lc                835 drivers/md/dm-log-writes.c 		DMEMIT("%s %s", lc->dev->name, lc->logdev->name);
lc                843 drivers/md/dm-log-writes.c 	struct log_writes_c *lc = ti->private;
lc                844 drivers/md/dm-log-writes.c 	struct dm_dev *dev = lc->dev;
lc                859 drivers/md/dm-log-writes.c 	struct log_writes_c *lc = ti->private;
lc                861 drivers/md/dm-log-writes.c 	return fn(ti, lc->dev, 0, ti->len, data);
lc                872 drivers/md/dm-log-writes.c 	struct log_writes_c *lc = ti->private;
lc                880 drivers/md/dm-log-writes.c 		r = log_mark(lc, argv[1]);
lc                889 drivers/md/dm-log-writes.c 	struct log_writes_c *lc = ti->private;
lc                890 drivers/md/dm-log-writes.c 	struct request_queue *q = bdev_get_queue(lc->dev->bdev);
lc                893 drivers/md/dm-log-writes.c 		lc->device_supports_discard = false;
lc                894 drivers/md/dm-log-writes.c 		limits->discard_granularity = lc->sectorsize;
lc                897 drivers/md/dm-log-writes.c 	limits->logical_block_size = bdev_logical_block_size(lc->dev->bdev);
lc                898 drivers/md/dm-log-writes.c 	limits->physical_block_size = bdev_physical_block_size(lc->dev->bdev);
lc                903 drivers/md/dm-log-writes.c static int log_dax(struct log_writes_c *lc, sector_t sector, size_t bytes,
lc                936 drivers/md/dm-log-writes.c 	block->sector = bio_to_dev_sectors(lc, sector);
lc                937 drivers/md/dm-log-writes.c 	block->nr_sectors = ALIGN(bytes, lc->sectorsize) >> lc->sectorshift;
lc                939 drivers/md/dm-log-writes.c 	atomic_inc(&lc->pending_blocks);
lc                940 drivers/md/dm-log-writes.c 	spin_lock_irq(&lc->blocks_lock);
lc                941 drivers/md/dm-log-writes.c 	list_add_tail(&block->list, &lc->unflushed_blocks);
lc                942 drivers/md/dm-log-writes.c 	spin_unlock_irq(&lc->blocks_lock);
lc                943 drivers/md/dm-log-writes.c 	wake_up_process(lc->log_kthread);
lc                951 drivers/md/dm-log-writes.c 	struct log_writes_c *lc = ti->private;
lc                955 drivers/md/dm-log-writes.c 	ret = bdev_dax_pgoff(lc->dev->bdev, sector, nr_pages * PAGE_SIZE, &pgoff);
lc                958 drivers/md/dm-log-writes.c 	return dax_direct_access(lc->dev->dax_dev, pgoff, nr_pages, kaddr, pfn);
lc                965 drivers/md/dm-log-writes.c 	struct log_writes_c *lc = ti->private;
lc                969 drivers/md/dm-log-writes.c 	if (bdev_dax_pgoff(lc->dev->bdev, sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
lc                973 drivers/md/dm-log-writes.c 	if (!lc->logging_enabled)
lc                976 drivers/md/dm-log-writes.c 	err = log_dax(lc, sector, bytes, i);
lc                982 drivers/md/dm-log-writes.c 	return dax_copy_from_iter(lc->dev->dax_dev, pgoff, addr, bytes, i);
lc                989 drivers/md/dm-log-writes.c 	struct log_writes_c *lc = ti->private;
lc                992 drivers/md/dm-log-writes.c 	if (bdev_dax_pgoff(lc->dev->bdev, sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
lc                994 drivers/md/dm-log-writes.c 	return dax_copy_to_iter(lc->dev->dax_dev, pgoff, addr, bytes, i);
lc                294 drivers/md/dm-log.c static int rw_header(struct log_c *lc, int op)
lc                296 drivers/md/dm-log.c 	lc->io_req.bi_op = op;
lc                297 drivers/md/dm-log.c 	lc->io_req.bi_op_flags = 0;
lc                299 drivers/md/dm-log.c 	return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
lc                302 drivers/md/dm-log.c static int flush_header(struct log_c *lc)
lc                305 drivers/md/dm-log.c 		.bdev = lc->header_location.bdev,
lc                310 drivers/md/dm-log.c 	lc->io_req.bi_op = REQ_OP_WRITE;
lc                311 drivers/md/dm-log.c 	lc->io_req.bi_op_flags = REQ_PREFLUSH;
lc                313 drivers/md/dm-log.c 	return dm_io(&lc->io_req, 1, &null_location, NULL);
lc                369 drivers/md/dm-log.c 	struct log_c *lc;
lc                401 drivers/md/dm-log.c 	lc = kmalloc(sizeof(*lc), GFP_KERNEL);
lc                402 drivers/md/dm-log.c 	if (!lc) {
lc                407 drivers/md/dm-log.c 	lc->ti = ti;
lc                408 drivers/md/dm-log.c 	lc->touched_dirtied = 0;
lc                409 drivers/md/dm-log.c 	lc->touched_cleaned = 0;
lc                410 drivers/md/dm-log.c 	lc->flush_failed = 0;
lc                411 drivers/md/dm-log.c 	lc->region_size = region_size;
lc                412 drivers/md/dm-log.c 	lc->region_count = region_count;
lc                413 drivers/md/dm-log.c 	lc->sync = sync;
lc                419 drivers/md/dm-log.c 				  sizeof(*lc->clean_bits) << BYTE_SHIFT);
lc                422 drivers/md/dm-log.c 	lc->bitset_uint32_count = bitset_size / sizeof(*lc->clean_bits);
lc                428 drivers/md/dm-log.c 		lc->clean_bits = vmalloc(bitset_size);
lc                429 drivers/md/dm-log.c 		if (!lc->clean_bits) {
lc                431 drivers/md/dm-log.c 			kfree(lc);
lc                434 drivers/md/dm-log.c 		lc->disk_header = NULL;
lc                436 drivers/md/dm-log.c 		lc->log_dev = dev;
lc                437 drivers/md/dm-log.c 		lc->log_dev_failed = 0;
lc                438 drivers/md/dm-log.c 		lc->log_dev_flush_failed = 0;
lc                439 drivers/md/dm-log.c 		lc->header_location.bdev = lc->log_dev->bdev;
lc                440 drivers/md/dm-log.c 		lc->header_location.sector = 0;
lc                447 drivers/md/dm-log.c 				bdev_logical_block_size(lc->header_location.
lc                453 drivers/md/dm-log.c 			kfree(lc);
lc                457 drivers/md/dm-log.c 		lc->header_location.count = buf_size >> SECTOR_SHIFT;
lc                459 drivers/md/dm-log.c 		lc->io_req.mem.type = DM_IO_VMA;
lc                460 drivers/md/dm-log.c 		lc->io_req.notify.fn = NULL;
lc                461 drivers/md/dm-log.c 		lc->io_req.client = dm_io_client_create();
lc                462 drivers/md/dm-log.c 		if (IS_ERR(lc->io_req.client)) {
lc                463 drivers/md/dm-log.c 			r = PTR_ERR(lc->io_req.client);
lc                465 drivers/md/dm-log.c 			kfree(lc);
lc                469 drivers/md/dm-log.c 		lc->disk_header = vmalloc(buf_size);
lc                470 drivers/md/dm-log.c 		if (!lc->disk_header) {
lc                472 drivers/md/dm-log.c 			dm_io_client_destroy(lc->io_req.client);
lc                473 drivers/md/dm-log.c 			kfree(lc);
lc                477 drivers/md/dm-log.c 		lc->io_req.mem.ptr.vma = lc->disk_header;
lc                478 drivers/md/dm-log.c 		lc->clean_bits = (void *)lc->disk_header +
lc                482 drivers/md/dm-log.c 	memset(lc->clean_bits, -1, bitset_size);
lc                484 drivers/md/dm-log.c 	lc->sync_bits = vmalloc(bitset_size);
lc                485 drivers/md/dm-log.c 	if (!lc->sync_bits) {
lc                488 drivers/md/dm-log.c 			vfree(lc->clean_bits);
lc                490 drivers/md/dm-log.c 			dm_io_client_destroy(lc->io_req.client);
lc                491 drivers/md/dm-log.c 		vfree(lc->disk_header);
lc                492 drivers/md/dm-log.c 		kfree(lc);
lc                495 drivers/md/dm-log.c 	memset(lc->sync_bits, (sync == NOSYNC) ? -1 : 0, bitset_size);
lc                496 drivers/md/dm-log.c 	lc->sync_count = (sync == NOSYNC) ? region_count : 0;
lc                498 drivers/md/dm-log.c 	lc->recovering_bits = vzalloc(bitset_size);
lc                499 drivers/md/dm-log.c 	if (!lc->recovering_bits) {
lc                501 drivers/md/dm-log.c 		vfree(lc->sync_bits);
lc                503 drivers/md/dm-log.c 			vfree(lc->clean_bits);
lc                505 drivers/md/dm-log.c 			dm_io_client_destroy(lc->io_req.client);
lc                506 drivers/md/dm-log.c 		vfree(lc->disk_header);
lc                507 drivers/md/dm-log.c 		kfree(lc);
lc                510 drivers/md/dm-log.c 	lc->sync_search = 0;
lc                511 drivers/md/dm-log.c 	log->context = lc;
lc                522 drivers/md/dm-log.c static void destroy_log_context(struct log_c *lc)
lc                524 drivers/md/dm-log.c 	vfree(lc->sync_bits);
lc                525 drivers/md/dm-log.c 	vfree(lc->recovering_bits);
lc                526 drivers/md/dm-log.c 	kfree(lc);
lc                531 drivers/md/dm-log.c 	struct log_c *lc = (struct log_c *) log->context;
lc                533 drivers/md/dm-log.c 	vfree(lc->clean_bits);
lc                534 drivers/md/dm-log.c 	destroy_log_context(lc);
lc                568 drivers/md/dm-log.c 	struct log_c *lc = (struct log_c *) log->context;
lc                570 drivers/md/dm-log.c 	dm_put_device(lc->ti, lc->log_dev);
lc                571 drivers/md/dm-log.c 	vfree(lc->disk_header);
lc                572 drivers/md/dm-log.c 	dm_io_client_destroy(lc->io_req.client);
lc                573 drivers/md/dm-log.c 	destroy_log_context(lc);
lc                576 drivers/md/dm-log.c static void fail_log_device(struct log_c *lc)
lc                578 drivers/md/dm-log.c 	if (lc->log_dev_failed)
lc                581 drivers/md/dm-log.c 	lc->log_dev_failed = 1;
lc                582 drivers/md/dm-log.c 	dm_table_event(lc->ti->table);
lc                589 drivers/md/dm-log.c 	struct log_c *lc = (struct log_c *) log->context;
lc                590 drivers/md/dm-log.c 	size_t size = lc->bitset_uint32_count * sizeof(uint32_t);
lc                593 drivers/md/dm-log.c 	r = read_header(lc);
lc                596 drivers/md/dm-log.c 		       lc->log_dev->name);
lc                597 drivers/md/dm-log.c 		fail_log_device(lc);
lc                605 drivers/md/dm-log.c 		lc->header.nr_regions = 0;
lc                609 drivers/md/dm-log.c 	if (lc->sync == NOSYNC)
lc                610 drivers/md/dm-log.c 		for (i = lc->header.nr_regions; i < lc->region_count; i++)
lc                612 drivers/md/dm-log.c 			log_set_bit(lc, lc->clean_bits, i);
lc                614 drivers/md/dm-log.c 		for (i = lc->header.nr_regions; i < lc->region_count; i++)
lc                616 drivers/md/dm-log.c 			log_clear_bit(lc, lc->clean_bits, i);
lc                619 drivers/md/dm-log.c 	for (i = lc->region_count; i % (sizeof(*lc->clean_bits) << BYTE_SHIFT); i++)
lc                620 drivers/md/dm-log.c 		log_clear_bit(lc, lc->clean_bits, i);
lc                623 drivers/md/dm-log.c 	memcpy(lc->sync_bits, lc->clean_bits, size);
lc                624 drivers/md/dm-log.c 	lc->sync_count = memweight(lc->clean_bits,
lc                625 drivers/md/dm-log.c 				lc->bitset_uint32_count * sizeof(uint32_t));
lc                626 drivers/md/dm-log.c 	lc->sync_search = 0;
lc                629 drivers/md/dm-log.c 	lc->header.nr_regions = lc->region_count;
lc                631 drivers/md/dm-log.c 	header_to_disk(&lc->header, lc->disk_header);
lc                634 drivers/md/dm-log.c 	r = rw_header(lc, REQ_OP_WRITE);
lc                636 drivers/md/dm-log.c 		r = flush_header(lc);
lc                638 drivers/md/dm-log.c 			lc->log_dev_flush_failed = 1;
lc                642 drivers/md/dm-log.c 		       lc->log_dev->name);
lc                643 drivers/md/dm-log.c 		fail_log_device(lc);
lc                651 drivers/md/dm-log.c 	struct log_c *lc = (struct log_c *) log->context;
lc                652 drivers/md/dm-log.c 	return lc->region_size;
lc                657 drivers/md/dm-log.c 	struct log_c *lc = (struct log_c *) log->context;
lc                658 drivers/md/dm-log.c 	lc->sync_search = 0;
lc                664 drivers/md/dm-log.c 	struct log_c *lc = (struct log_c *) log->context;
lc                665 drivers/md/dm-log.c 	return log_test_bit(lc->clean_bits, region);
lc                670 drivers/md/dm-log.c 	struct log_c *lc = (struct log_c *) log->context;
lc                671 drivers/md/dm-log.c 	return log_test_bit(lc->sync_bits, region);
lc                683 drivers/md/dm-log.c 	struct log_c *lc = log->context;
lc                686 drivers/md/dm-log.c 	if (!lc->touched_cleaned && !lc->touched_dirtied)
lc                689 drivers/md/dm-log.c 	if (lc->touched_cleaned && log->flush_callback_fn &&
lc                690 drivers/md/dm-log.c 	    log->flush_callback_fn(lc->ti)) {
lc                697 drivers/md/dm-log.c 		lc->flush_failed = 1;
lc                698 drivers/md/dm-log.c 		for (i = 0; i < lc->region_count; i++)
lc                699 drivers/md/dm-log.c 			log_clear_bit(lc, lc->clean_bits, i);
lc                702 drivers/md/dm-log.c 	r = rw_header(lc, REQ_OP_WRITE);
lc                704 drivers/md/dm-log.c 		fail_log_device(lc);
lc                706 drivers/md/dm-log.c 		if (lc->touched_dirtied) {
lc                707 drivers/md/dm-log.c 			r = flush_header(lc);
lc                709 drivers/md/dm-log.c 				lc->log_dev_flush_failed = 1;
lc                710 drivers/md/dm-log.c 				fail_log_device(lc);
lc                712 drivers/md/dm-log.c 				lc->touched_dirtied = 0;
lc                714 drivers/md/dm-log.c 		lc->touched_cleaned = 0;
lc                722 drivers/md/dm-log.c 	struct log_c *lc = (struct log_c *) log->context;
lc                723 drivers/md/dm-log.c 	log_clear_bit(lc, lc->clean_bits, region);
lc                728 drivers/md/dm-log.c 	struct log_c *lc = (struct log_c *) log->context;
lc                729 drivers/md/dm-log.c 	if (likely(!lc->flush_failed))
lc                730 drivers/md/dm-log.c 		log_set_bit(lc, lc->clean_bits, region);
lc                735 drivers/md/dm-log.c 	struct log_c *lc = (struct log_c *) log->context;
lc                737 drivers/md/dm-log.c 	if (lc->sync_search >= lc->region_count)
lc                741 drivers/md/dm-log.c 		*region = find_next_zero_bit_le(lc->sync_bits,
lc                742 drivers/md/dm-log.c 					     lc->region_count,
lc                743 drivers/md/dm-log.c 					     lc->sync_search);
lc                744 drivers/md/dm-log.c 		lc->sync_search = *region + 1;
lc                746 drivers/md/dm-log.c 		if (*region >= lc->region_count)
lc                749 drivers/md/dm-log.c 	} while (log_test_bit(lc->recovering_bits, *region));
lc                751 drivers/md/dm-log.c 	log_set_bit(lc, lc->recovering_bits, *region);
lc                758 drivers/md/dm-log.c 	struct log_c *lc = (struct log_c *) log->context;
lc                760 drivers/md/dm-log.c 	log_clear_bit(lc, lc->recovering_bits, region);
lc                762 drivers/md/dm-log.c 		log_set_bit(lc, lc->sync_bits, region);
lc                763 drivers/md/dm-log.c                 lc->sync_count++;
lc                764 drivers/md/dm-log.c         } else if (log_test_bit(lc->sync_bits, region)) {
lc                765 drivers/md/dm-log.c 		lc->sync_count--;
lc                766 drivers/md/dm-log.c 		log_clear_bit(lc, lc->sync_bits, region);
lc                772 drivers/md/dm-log.c         struct log_c *lc = (struct log_c *) log->context;
lc                774 drivers/md/dm-log.c         return lc->sync_count;
lc                778 drivers/md/dm-log.c 	if (lc->sync != DEFAULTSYNC) \
lc                779 drivers/md/dm-log.c 		DMEMIT("%ssync ", lc->sync == NOSYNC ? "no" : "")
lc                785 drivers/md/dm-log.c 	struct log_c *lc = log->context;
lc                794 drivers/md/dm-log.c 		       lc->sync == DEFAULTSYNC ? 1 : 2, lc->region_size);
lc                805 drivers/md/dm-log.c 	struct log_c *lc = log->context;
lc                809 drivers/md/dm-log.c 		DMEMIT("3 %s %s %c", log->type->name, lc->log_dev->name,
lc                810 drivers/md/dm-log.c 		       lc->log_dev_flush_failed ? 'F' :
lc                811 drivers/md/dm-log.c 		       lc->log_dev_failed ? 'D' :
lc                817 drivers/md/dm-log.c 		       lc->sync == DEFAULTSYNC ? 2 : 3, lc->log_dev->name,
lc                818 drivers/md/dm-log.c 		       lc->region_size);
lc                341 drivers/net/ethernet/chelsio/cxgb/common.h int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
lc                637 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	struct link_config *lc = &p->link_config;
lc                643 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	if (!(lc->supported & SUPPORTED_Autoneg))
lc                650 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		if (!(lc->supported & cap) || (speed == SPEED_1000))
lc                652 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		lc->requested_speed = speed;
lc                653 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		lc->requested_duplex = cmd->base.duplex;
lc                654 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		lc->advertising = 0;
lc                658 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 			advertising = lc->supported;
lc                659 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		advertising &= lc->supported;
lc                662 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		lc->requested_speed = SPEED_INVALID;
lc                663 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		lc->requested_duplex = DUPLEX_INVALID;
lc                664 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		lc->advertising = advertising | ADVERTISED_Autoneg;
lc                666 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	lc->autoneg = cmd->base.autoneg;
lc                668 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		t1_link_start(p->phy, p->mac, lc);
lc                688 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	struct link_config *lc = &p->link_config;
lc                691 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		lc->requested_fc = 0;
lc                692 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	else if (lc->supported & SUPPORTED_Autoneg)
lc                693 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		lc->requested_fc = PAUSE_AUTONEG;
lc                698 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		lc->requested_fc |= PAUSE_RX;
lc                700 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		lc->requested_fc |= PAUSE_TX;
lc                701 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 	if (lc->autoneg == AUTONEG_ENABLE) {
lc                703 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 			t1_link_start(p->phy, p->mac, lc);
lc                705 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
lc                708 drivers/net/ethernet/chelsio/cxgb/cxgb2.c 							 lc->fc);
lc                154 drivers/net/ethernet/chelsio/cxgb/subr.c 	struct link_config *lc = &adapter->port[port_id].link_config;
lc                158 drivers/net/ethernet/chelsio/cxgb/subr.c 	lc->speed = speed < 0 ? SPEED_INVALID : speed;
lc                159 drivers/net/ethernet/chelsio/cxgb/subr.c 	lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
lc                160 drivers/net/ethernet/chelsio/cxgb/subr.c 	if (!(lc->requested_fc & PAUSE_AUTONEG))
lc                161 drivers/net/ethernet/chelsio/cxgb/subr.c 		fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
lc                163 drivers/net/ethernet/chelsio/cxgb/subr.c 	if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
lc                168 drivers/net/ethernet/chelsio/cxgb/subr.c 		lc->fc = (unsigned char)fc;
lc                623 drivers/net/ethernet/chelsio/cxgb/subr.c int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
lc                625 drivers/net/ethernet/chelsio/cxgb/subr.c 	unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
lc                627 drivers/net/ethernet/chelsio/cxgb/subr.c 	if (lc->supported & SUPPORTED_Autoneg) {
lc                628 drivers/net/ethernet/chelsio/cxgb/subr.c 		lc->advertising &= ~(ADVERTISED_ASYM_PAUSE | ADVERTISED_PAUSE);
lc                632 drivers/net/ethernet/chelsio/cxgb/subr.c 				lc->advertising |= ADVERTISED_PAUSE;
lc                634 drivers/net/ethernet/chelsio/cxgb/subr.c 				lc->advertising |= ADVERTISED_ASYM_PAUSE;
lc                636 drivers/net/ethernet/chelsio/cxgb/subr.c 					lc->advertising |= ADVERTISED_PAUSE;
lc                639 drivers/net/ethernet/chelsio/cxgb/subr.c 		phy->ops->advertise(phy, lc->advertising);
lc                641 drivers/net/ethernet/chelsio/cxgb/subr.c 		if (lc->autoneg == AUTONEG_DISABLE) {
lc                642 drivers/net/ethernet/chelsio/cxgb/subr.c 			lc->speed = lc->requested_speed;
lc                643 drivers/net/ethernet/chelsio/cxgb/subr.c 			lc->duplex = lc->requested_duplex;
lc                644 drivers/net/ethernet/chelsio/cxgb/subr.c 			lc->fc = (unsigned char)fc;
lc                645 drivers/net/ethernet/chelsio/cxgb/subr.c 			mac->ops->set_speed_duplex_fc(mac, lc->speed,
lc                646 drivers/net/ethernet/chelsio/cxgb/subr.c 						      lc->duplex, fc);
lc                649 drivers/net/ethernet/chelsio/cxgb/subr.c 			phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
lc                658 drivers/net/ethernet/chelsio/cxgb/subr.c 		lc->fc = (unsigned char)fc;
lc               1030 drivers/net/ethernet/chelsio/cxgb/subr.c static void init_link_config(struct link_config *lc,
lc               1033 drivers/net/ethernet/chelsio/cxgb/subr.c 	lc->supported = bi->caps;
lc               1034 drivers/net/ethernet/chelsio/cxgb/subr.c 	lc->requested_speed = lc->speed = SPEED_INVALID;
lc               1035 drivers/net/ethernet/chelsio/cxgb/subr.c 	lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
lc               1036 drivers/net/ethernet/chelsio/cxgb/subr.c 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
lc               1037 drivers/net/ethernet/chelsio/cxgb/subr.c 	if (lc->supported & SUPPORTED_Autoneg) {
lc               1038 drivers/net/ethernet/chelsio/cxgb/subr.c 		lc->advertising = lc->supported;
lc               1039 drivers/net/ethernet/chelsio/cxgb/subr.c 		lc->autoneg = AUTONEG_ENABLE;
lc               1040 drivers/net/ethernet/chelsio/cxgb/subr.c 		lc->requested_fc |= PAUSE_AUTONEG;
lc               1042 drivers/net/ethernet/chelsio/cxgb/subr.c 		lc->advertising = 0;
lc               1043 drivers/net/ethernet/chelsio/cxgb/subr.c 		lc->autoneg = AUTONEG_DISABLE;
lc                677 drivers/net/ethernet/chelsio/cxgb3/common.h int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
lc               1871 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	struct link_config *lc = &p->link_config;
lc               1877 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	if (!(lc->supported & SUPPORTED_Autoneg)) {
lc               1885 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			if (lc->supported & cap)
lc               1895 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		if (!(lc->supported & cap) || (speed == SPEED_1000))
lc               1897 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		lc->requested_speed = speed;
lc               1898 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		lc->requested_duplex = cmd->base.duplex;
lc               1899 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		lc->advertising = 0;
lc               1902 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		advertising &= lc->supported;
lc               1905 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		lc->requested_speed = SPEED_INVALID;
lc               1906 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		lc->requested_duplex = DUPLEX_INVALID;
lc               1907 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		lc->advertising = advertising | ADVERTISED_Autoneg;
lc               1909 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	lc->autoneg = cmd->base.autoneg;
lc               1911 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		t3_link_start(&p->phy, &p->mac, lc);
lc               1929 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	struct link_config *lc = &p->link_config;
lc               1932 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		lc->requested_fc = 0;
lc               1933 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	else if (lc->supported & SUPPORTED_Autoneg)
lc               1934 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		lc->requested_fc = PAUSE_AUTONEG;
lc               1939 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		lc->requested_fc |= PAUSE_RX;
lc               1941 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		lc->requested_fc |= PAUSE_TX;
lc               1942 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 	if (lc->autoneg == AUTONEG_ENABLE) {
lc               1944 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			t3_link_start(&p->phy, &p->mac, lc);
lc               1946 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
lc               1948 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 			t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
lc               1246 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	struct link_config *lc = &pi->link_config;
lc               1250 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	if (!lc->link_ok && link_ok) {
lc               1267 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	if (lc->requested_fc & PAUSE_AUTONEG)
lc               1268 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		fc &= lc->requested_fc;
lc               1270 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
lc               1272 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	if (link_ok == lc->link_ok && speed == lc->speed &&
lc               1273 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	    duplex == lc->duplex && fc == lc->fc)
lc               1276 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
lc               1283 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	lc->link_ok = link_ok;
lc               1284 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	lc->speed = speed < 0 ? SPEED_INVALID : speed;
lc               1285 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
lc               1287 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
lc               1290 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		lc->fc = fc;
lc               1302 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	struct link_config *lc = &pi->link_config;
lc               1320 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	link_ok = lc->link_ok;
lc               1321 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	speed = lc->speed;
lc               1322 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	duplex = lc->duplex;
lc               1323 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	fc = lc->fc;
lc               1328 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		lc->link_ok = 0;
lc               1329 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		lc->speed = SPEED_INVALID;
lc               1330 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		lc->duplex = DUPLEX_INVALID;
lc               1343 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		lc->link_ok = (unsigned char)link_ok;
lc               1344 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		lc->speed = speed < 0 ? SPEED_INVALID : speed;
lc               1345 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
lc               1363 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
lc               1365 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
lc               1367 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	lc->link_ok = 0;
lc               1368 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	if (lc->supported & SUPPORTED_Autoneg) {
lc               1369 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
lc               1371 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 			lc->advertising |= ADVERTISED_Asym_Pause;
lc               1373 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 				lc->advertising |= ADVERTISED_Pause;
lc               1375 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		phy->ops->advertise(phy, lc->advertising);
lc               1377 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		if (lc->autoneg == AUTONEG_DISABLE) {
lc               1378 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 			lc->speed = lc->requested_speed;
lc               1379 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 			lc->duplex = lc->requested_duplex;
lc               1380 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 			lc->fc = (unsigned char)fc;
lc               1381 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 			t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
lc               1384 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 			phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
lc               1389 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		lc->fc = (unsigned char)fc;
lc               3492 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c static void init_link_config(struct link_config *lc, unsigned int caps)
lc               3494 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	lc->supported = caps;
lc               3495 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	lc->requested_speed = lc->speed = SPEED_INVALID;
lc               3496 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
lc               3497 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
lc               3498 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 	if (lc->supported & SUPPORTED_Autoneg) {
lc               3499 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		lc->advertising = lc->supported;
lc               3500 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		lc->autoneg = AUTONEG_ENABLE;
lc               3501 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		lc->requested_fc |= PAUSE_AUTONEG;
lc               3503 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		lc->advertising = 0;
lc               3504 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c 		lc->autoneg = AUTONEG_DISABLE;
lc               1589 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h 			      struct link_config *lc);
lc               1591 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h 		       unsigned int port, struct link_config *lc,
lc               1595 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h 				unsigned int port, struct link_config *lc)
lc               1597 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h 	return t4_link_l1cfg_core(adapter, mbox, port, lc,
lc               1602 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h 				   unsigned int port, struct link_config *lc)
lc               1604 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h 	return t4_link_l1cfg_core(adapter, mbox, port, lc,
lc                651 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	struct link_config *lc = &pi->link_cfg;
lc                661 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	old_lc = *lc;
lc                662 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	if (!(lc->pcaps & FW_PORT_CAP32_ANEG) ||
lc                667 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 		if (!(lc->pcaps & fw_caps))
lc                670 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 		lc->speed_caps = fw_caps;
lc                671 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 		lc->acaps = fw_caps;
lc                675 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 		if (!(lc->pcaps & fw_caps))
lc                677 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 		lc->speed_caps = 0;
lc                678 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 		lc->acaps = fw_caps | FW_PORT_CAP32_ANEG;
lc                680 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	lc->autoneg = base->autoneg;
lc                685 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	ret = t4_link_l1cfg(pi->adapter, pi->adapter->mbox, pi->tx_chan, lc);
lc                687 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 		*lc = old_lc;
lc                749 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	const struct link_config *lc = &pi->link_cfg;
lc                755 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	fec->fec = fwcap_to_eth_fec(lc->pcaps);
lc                762 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	fec->active_fec = cc_to_eth_fec(lc->fec);
lc                770 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	struct link_config *lc = &pi->link_cfg;
lc                777 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	old_lc = *lc;
lc                782 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	lc->requested_fec = eth_to_cc_fec(fec->fec);
lc                784 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 			    pi->tx_chan, lc);
lc                786 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 		*lc = old_lc;
lc                804 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	struct link_config *lc = &p->link_cfg;
lc                807 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 		lc->requested_fc = 0;
lc                808 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	else if (lc->pcaps & FW_PORT_CAP32_ANEG)
lc                809 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 		lc->requested_fc = PAUSE_AUTONEG;
lc                814 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 		lc->requested_fc |= PAUSE_RX;
lc                816 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 		lc->requested_fc |= PAUSE_TX;
lc                819 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 				     lc);
lc               5061 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c static inline bool is_x_10g_port(const struct link_config *lc)
lc               5065 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	speeds = FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_G(lc->pcaps));
lc               4139 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 			      struct link_config *lc)
lc               4145 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	fw_mdi = (FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO) & lc->pcaps);
lc               4150 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	fw_fc = cc_to_fwcap_pause(lc->requested_fc);
lc               4159 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	if (lc->requested_fec & FEC_AUTO)
lc               4160 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		cc_fec = fwcap_to_cc_fec(lc->def_acaps);
lc               4162 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		cc_fec = lc->requested_fec;
lc               4169 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
lc               4170 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		acaps = lc->acaps | fw_fc | fw_fec;
lc               4171 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
lc               4172 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		lc->fec = cc_fec;
lc               4173 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	} else if (lc->autoneg == AUTONEG_DISABLE) {
lc               4174 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		acaps = lc->speed_caps | fw_fc | fw_fec | fw_mdi;
lc               4175 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
lc               4176 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		lc->fec = cc_fec;
lc               4178 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		acaps = lc->acaps | fw_fc | fw_fec | fw_mdi;
lc               4189 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	if ((acaps & ~lc->pcaps) & ~FW_PORT_CAP32_FORCE_PAUSE) {
lc               4191 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 			acaps, lc->pcaps);
lc               4216 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		       unsigned int port, struct link_config *lc,
lc               4224 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	if (!(lc->pcaps & FW_PORT_CAP32_ANEG) &&
lc               4225 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	    lc->autoneg == AUTONEG_ENABLE) {
lc               4232 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	rcap = t4_link_acaps(adapter, port, lc);
lc               8568 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	struct link_config *lc = &pi->link_cfg;
lc               8623 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	lc->new_module = false;
lc               8624 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	lc->redo_l1cfg = false;
lc               8635 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		lc->pcaps = pcaps;
lc               8645 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		lc->def_acaps = acaps;
lc               8666 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		lc->new_module = t4_is_inserted_mod_type(mod_type);
lc               8671 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	if (link_ok != lc->link_ok || speed != lc->speed ||
lc               8672 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	    fc != lc->fc || adv_fc != lc->advertised_fc ||
lc               8673 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	    fec != lc->fec) {
lc               8675 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		if (!link_ok && lc->link_ok) {
lc               8676 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 			lc->link_down_rc = linkdnrc;
lc               8682 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		lc->link_ok = link_ok;
lc               8683 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		lc->speed = speed;
lc               8684 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		lc->advertised_fc = adv_fc;
lc               8685 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		lc->fc = fc;
lc               8686 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		lc->fec = fec;
lc               8688 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		lc->lpacaps = lpacaps;
lc               8689 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		lc->acaps = acaps & ADVERT_MASK;
lc               8696 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		if (!(lc->acaps & FW_PORT_CAP32_ANEG)) {
lc               8697 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 			lc->autoneg = AUTONEG_DISABLE;
lc               8698 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		} else if (lc->acaps & FW_PORT_CAP32_ANEG) {
lc               8699 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 			lc->autoneg = AUTONEG_ENABLE;
lc               8705 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 			lc->acaps = 0;
lc               8706 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 			lc->speed_caps = fwcap_to_fwspeed(acaps);
lc               8707 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 			lc->autoneg = AUTONEG_DISABLE;
lc               8717 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	if (lc->new_module && lc->redo_l1cfg) {
lc               8726 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		old_lc = *lc;
lc               8727 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		ret = t4_link_l1cfg_ns(adapter, adapter->mbox, pi->lport, lc);
lc               8729 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 			*lc = old_lc;
lc               8734 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	lc->new_module = false;
lc               8735 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	lc->redo_l1cfg = false;
lc               8890 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c static void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
lc               8893 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	lc->pcaps = pcaps;
lc               8894 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	lc->def_acaps = acaps;
lc               8895 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	lc->lpacaps = 0;
lc               8896 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	lc->speed_caps = 0;
lc               8897 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	lc->speed = 0;
lc               8898 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
lc               8903 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	lc->requested_fec = FEC_AUTO;
lc               8904 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	lc->fec = fwcap_to_cc_fec(lc->def_acaps);
lc               8913 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 	if (lc->pcaps & FW_PORT_CAP32_ANEG) {
lc               8914 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		lc->acaps = lc->pcaps & ADVERT_MASK;
lc               8915 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		lc->autoneg = AUTONEG_ENABLE;
lc               8916 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		lc->requested_fc |= PAUSE_AUTONEG;
lc               8918 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		lc->acaps = 0;
lc               8919 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		lc->autoneg = AUTONEG_DISABLE;
lc               8920 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 		lc->speed_caps = fwcap_to_fwspeed(acaps);
lc               1533 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	const struct link_config *lc = &pi->link_cfg;
lc               1539 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	fec->fec = fwcap_to_eth_fec(lc->pcaps);
lc               1546 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c 	fec->active_fec = cc_to_eth_fec(lc->fec);
lc                153 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h static inline bool is_x_10g_port(const struct link_config *lc)
lc                157 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h 	speeds = FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_G(lc->pcaps));
lc                457 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c static void init_link_config(struct link_config *lc,
lc                461 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 	lc->pcaps = pcaps;
lc                462 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 	lc->lpacaps = 0;
lc                463 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 	lc->speed_caps = 0;
lc                464 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 	lc->speed = 0;
lc                465 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
lc                470 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 	lc->auto_fec = fwcap_to_cc_fec(acaps);
lc                471 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 	lc->requested_fec = FEC_AUTO;
lc                472 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 	lc->fec = lc->auto_fec;
lc                481 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 	if (lc->pcaps & FW_PORT_CAP32_ANEG) {
lc                482 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		lc->acaps = acaps & ADVERT_MASK;
lc                483 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		lc->autoneg = AUTONEG_ENABLE;
lc                484 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		lc->requested_fc |= PAUSE_AUTONEG;
lc                486 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		lc->acaps = 0;
lc                487 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		lc->autoneg = AUTONEG_DISABLE;
lc                488 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		lc->speed_caps = fwcap_to_fwspeed(acaps);
lc               1917 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 	struct link_config *lc = &pi->link_cfg;
lc               1997 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		lc->auto_fec = fec;
lc               2015 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 	if (link_ok != lc->link_ok || speed != lc->speed ||
lc               2016 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 	    fc != lc->fc || adv_fc != lc->advertised_fc ||
lc               2017 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 	    fec != lc->fec) {
lc               2019 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		if (!link_ok && lc->link_ok) {
lc               2020 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 			lc->link_down_rc = linkdnrc;
lc               2026 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		lc->link_ok = link_ok;
lc               2027 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		lc->speed = speed;
lc               2028 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		lc->advertised_fc = adv_fc;
lc               2029 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		lc->fc = fc;
lc               2030 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		lc->fec = fec;
lc               2032 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		lc->pcaps = pcaps;
lc               2033 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		lc->lpacaps = lpacaps;
lc               2034 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		lc->acaps = acaps & ADVERT_MASK;
lc               2041 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
lc               2042 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 			lc->autoneg = AUTONEG_DISABLE;
lc               2043 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 		} else if (lc->acaps & FW_PORT_CAP32_ANEG) {
lc               2044 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 			lc->autoneg = AUTONEG_ENABLE;
lc               2050 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 			lc->acaps = 0;
lc               2051 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 			lc->speed_caps = fwcap_to_speed(acaps);
lc               2052 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c 			lc->autoneg = AUTONEG_DISABLE;
lc                482 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	int i, j, lc, good_cnt, ret_val = 0;
lc                499 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	lc = 1;
lc                500 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	for (j = 0; j < lc; j++) {
lc               1384 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 	int i, j, k, l, lc, good_cnt, ret_val = 0;
lc               1395 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 		lc = ((txdr->count / 64) * 2) + 1;
lc               1397 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 		lc = ((rxdr->count / 64) * 2) + 1;
lc               1400 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 	for (j = 0; j <= lc; j++) { /* loop count loop */
lc               1633 drivers/net/ethernet/intel/e1000e/ethtool.c 	int lc;
lc               1646 drivers/net/ethernet/intel/e1000e/ethtool.c 		lc = ((tx_ring->count / 64) * 2) + 1;
lc               1648 drivers/net/ethernet/intel/e1000e/ethtool.c 		lc = ((rx_ring->count / 64) * 2) + 1;
lc               1653 drivers/net/ethernet/intel/e1000e/ethtool.c 	for (j = 0; j <= lc; j++) {
lc               1874 drivers/net/ethernet/intel/igb/igb_ethtool.c 	u16 i, j, lc, good_cnt;
lc               1895 drivers/net/ethernet/intel/igb/igb_ethtool.c 		lc = ((tx_ring->count / 64) * 2) + 1;
lc               1897 drivers/net/ethernet/intel/igb/igb_ethtool.c 		lc = ((rx_ring->count / 64) * 2) + 1;
lc               1899 drivers/net/ethernet/intel/igb/igb_ethtool.c 	for (j = 0; j <= lc; j++) { /* loop count loop */
lc               1974 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	int i, j, lc, good_cnt, ret_val = 0;
lc               1999 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 		lc = ((tx_ring->count / 64) * 2) + 1;
lc               2001 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 		lc = ((rx_ring->count / 64) * 2) + 1;
lc               2003 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	for (j = 0; j <= lc; j++) {
lc               2211 drivers/net/ethernet/pensando/ionic/ionic_lif.c 	union ionic_lif_config *lc;
lc               2220 drivers/net/ethernet/pensando/ionic/ionic_lif.c 	lc = &ident->lif.eth.config;
lc               2223 drivers/net/ethernet/pensando/ionic/ionic_lif.c 	nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]);
lc               2224 drivers/net/ethernet/pensando/ionic/ionic_lif.c 	ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]);
lc               2225 drivers/net/ethernet/pensando/ionic/ionic_lif.c 	nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]);
lc                629 drivers/perf/qcom_l3_pmu.c 	L3CACHE_PMU_FORMAT_ATTR(lc, "config:" __stringify(L3_EVENT_LC_BIT)),
lc               2167 drivers/pinctrl/tegra/pinctrl-tegra20.c 	PULL_PG(lc,      0xac, 22),
lc                 78 drivers/s390/char/tape_3590.h 	unsigned int lc:3;
lc               1688 drivers/scsi/csiostor/csio_hw.c static void csio_init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
lc               1691 drivers/scsi/csiostor/csio_hw.c 	lc->pcaps = pcaps;
lc               1692 drivers/scsi/csiostor/csio_hw.c 	lc->def_acaps = acaps;
lc               1693 drivers/scsi/csiostor/csio_hw.c 	lc->lpacaps = 0;
lc               1694 drivers/scsi/csiostor/csio_hw.c 	lc->speed_caps = 0;
lc               1695 drivers/scsi/csiostor/csio_hw.c 	lc->speed = 0;
lc               1696 drivers/scsi/csiostor/csio_hw.c 	lc->requested_fc = PAUSE_RX | PAUSE_TX;
lc               1697 drivers/scsi/csiostor/csio_hw.c 	lc->fc = lc->requested_fc;
lc               1703 drivers/scsi/csiostor/csio_hw.c 	lc->requested_fec = FEC_AUTO;
lc               1704 drivers/scsi/csiostor/csio_hw.c 	lc->fec = fwcap_to_cc_fec(lc->def_acaps);
lc               1713 drivers/scsi/csiostor/csio_hw.c 	if (lc->pcaps & FW_PORT_CAP32_ANEG) {
lc               1714 drivers/scsi/csiostor/csio_hw.c 		lc->acaps = lc->pcaps & ADVERT_MASK;
lc               1715 drivers/scsi/csiostor/csio_hw.c 		lc->autoneg = AUTONEG_ENABLE;
lc               1716 drivers/scsi/csiostor/csio_hw.c 		lc->requested_fc |= PAUSE_AUTONEG;
lc               1718 drivers/scsi/csiostor/csio_hw.c 		lc->acaps = 0;
lc               1719 drivers/scsi/csiostor/csio_hw.c 		lc->autoneg = AUTONEG_DISABLE;
lc               1723 drivers/scsi/csiostor/csio_hw.c static void csio_link_l1cfg(struct link_config *lc, uint16_t fw_caps,
lc               1729 drivers/scsi/csiostor/csio_hw.c 	lc->link_ok = 0;
lc               1735 drivers/scsi/csiostor/csio_hw.c 	fw_fc = cc_to_fwcap_pause(lc->requested_fc);
lc               1745 drivers/scsi/csiostor/csio_hw.c 	if (lc->requested_fec & FEC_AUTO)
lc               1746 drivers/scsi/csiostor/csio_hw.c 		cc_fec = fwcap_to_cc_fec(lc->def_acaps);
lc               1748 drivers/scsi/csiostor/csio_hw.c 		cc_fec = lc->requested_fec;
lc               1755 drivers/scsi/csiostor/csio_hw.c 	if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
lc               1756 drivers/scsi/csiostor/csio_hw.c 		lrcap = (lc->pcaps & ADVERT_MASK) | fw_fc | fw_fec;
lc               1757 drivers/scsi/csiostor/csio_hw.c 		lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
lc               1758 drivers/scsi/csiostor/csio_hw.c 		lc->fec = cc_fec;
lc               1759 drivers/scsi/csiostor/csio_hw.c 	} else if (lc->autoneg == AUTONEG_DISABLE) {
lc               1760 drivers/scsi/csiostor/csio_hw.c 		lrcap = lc->speed_caps | fw_fc | fw_fec | fw_mdi;
lc               1761 drivers/scsi/csiostor/csio_hw.c 		lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
lc               1762 drivers/scsi/csiostor/csio_hw.c 		lc->fec = cc_fec;
lc               1764 drivers/scsi/csiostor/csio_hw.c 		lrcap = lc->acaps | fw_fc | fw_fec | fw_mdi;
lc                193 drivers/video/fbdev/matrox/matroxfb_misc.c 	unsigned int vd, vs, ve, vt, lc;
lc                272 drivers/video/fbdev/matrox/matroxfb_misc.c 	lc = vd;
lc                300 drivers/video/fbdev/matrox/matroxfb_misc.c 			  ((lc & 0x400) >>  3);
lc                315 drivers/video/fbdev/matrox/matroxfb_misc.c 		      ((lc & 0x100) >> 4) |
lc                321 drivers/video/fbdev/matrox/matroxfb_misc.c 		      ((lc & 0x200) >> 3);
lc                334 drivers/video/fbdev/matrox/matroxfb_misc.c 	hw->CRTC[24] = lc;
lc                321 fs/affs/namei.c 	char			 c, lc;
lc                343 fs/affs/namei.c 	lc = '/';
lc                354 fs/affs/namei.c 		if (c == '.' && lc == '/' && *symname == '.' && symname[1] == '/') {
lc                358 fs/affs/namei.c 			lc = '/';
lc                359 fs/affs/namei.c 		} else if (c == '.' && lc == '/' && *symname == '/') {
lc                361 fs/affs/namei.c 			lc = '/';
lc                364 fs/affs/namei.c 			lc   = c;
lc                367 fs/affs/namei.c 		if (lc == '/')
lc                 22 fs/affs/symlink.c 	char			 lc;
lc                 32 fs/affs/symlink.c 	lc = 0;
lc                 47 fs/affs/symlink.c 		lc = '/';
lc                 50 fs/affs/symlink.c 		if (c == '/' && lc == '/' && i < 1020) {	/* parent dir */
lc                 55 fs/affs/symlink.c 		lc = c;
lc               3515 fs/cifs/smb2ops.c 	struct create_lease *lc = (struct create_lease *)buf;
lc               3518 fs/cifs/smb2ops.c 	if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
lc               3520 fs/cifs/smb2ops.c 	return le32_to_cpu(lc->lcontext.LeaseState);
lc               3526 fs/cifs/smb2ops.c 	struct create_lease_v2 *lc = (struct create_lease_v2 *)buf;
lc               3528 fs/cifs/smb2ops.c 	*epoch = le16_to_cpu(lc->lcontext.Epoch);
lc               3529 fs/cifs/smb2ops.c 	if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
lc               3532 fs/cifs/smb2ops.c 		memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
lc               3533 fs/cifs/smb2ops.c 	return le32_to_cpu(lc->lcontext.LeaseState);
lc                 71 fs/hpfs/dir.c  	int lc;
lc                112 fs/hpfs/dir.c  	lc = hpfs_sb(inode->i_sb)->sb_lowercase;
lc                172 fs/hpfs/dir.c  		tempname = hpfs_translate_name(inode->i_sb, de->name, de->namelen, lc, de->not_8x3);
lc                 55 fs/hpfs/name.c 			  unsigned len, int lc, int lng)
lc                 66 fs/hpfs/name.c 	if (!lc) return from;
lc                817 fs/ocfs2/stack_user.c 	struct ocfs2_live_connection *lc = conn->cc_private;
lc                818 fs/ocfs2/stack_user.c 	complete(&lc->oc_sync_wait);
lc                825 fs/ocfs2/stack_user.c 	struct ocfs2_live_connection *lc = conn->cc_private;
lc                834 fs/ocfs2/stack_user.c 	wait_for_completion(&lc->oc_sync_wait);
lc                849 fs/ocfs2/stack_user.c 	struct ocfs2_live_connection *lc = conn->cc_private;
lc                860 fs/ocfs2/stack_user.c 	wait_for_completion(&lc->oc_sync_wait);
lc                876 fs/ocfs2/stack_user.c 	struct ocfs2_live_connection *lc = conn->cc_private;
lc                878 fs/ocfs2/stack_user.c 			&lc->oc_version_lksb, VERSION_LOCK);
lc                883 fs/ocfs2/stack_user.c 	struct ocfs2_live_connection *lc = conn->cc_private;
lc                884 fs/ocfs2/stack_user.c 	return sync_unlock(conn, &lc->oc_version_lksb, VERSION_LOCK);
lc                901 fs/ocfs2/stack_user.c 	struct ocfs2_live_connection *lc = conn->cc_private;
lc                909 fs/ocfs2/stack_user.c 	lc->oc_version_lksb.sb_lvbptr = lc->oc_lvb;
lc                915 fs/ocfs2/stack_user.c 		version_to_lvb(&running_proto, lc->oc_lvb);
lc                921 fs/ocfs2/stack_user.c 		lvb_to_version(lc->oc_lvb, &pv);
lc                954 fs/ocfs2/stack_user.c 	struct ocfs2_live_connection *lc = conn->cc_private;
lc                959 fs/ocfs2/stack_user.c 			atomic_set(&lc->oc_this_node, slots[i].nodeid);
lc                963 fs/ocfs2/stack_user.c 	lc->oc_our_slot = our_slot;
lc                964 fs/ocfs2/stack_user.c 	wake_up(&lc->oc_wait);
lc                986 fs/ocfs2/stack_user.c 	struct ocfs2_live_connection *lc;
lc                991 fs/ocfs2/stack_user.c 	lc = kzalloc(sizeof(struct ocfs2_live_connection), GFP_KERNEL);
lc                992 fs/ocfs2/stack_user.c 	if (!lc)
lc                995 fs/ocfs2/stack_user.c 	init_waitqueue_head(&lc->oc_wait);
lc                996 fs/ocfs2/stack_user.c 	init_completion(&lc->oc_sync_wait);
lc                997 fs/ocfs2/stack_user.c 	atomic_set(&lc->oc_this_node, 0);
lc                998 fs/ocfs2/stack_user.c 	conn->cc_private = lc;
lc                999 fs/ocfs2/stack_user.c 	lc->oc_type = NO_CONTROLD;
lc               1015 fs/ocfs2/stack_user.c 		lc->oc_type = WITH_CONTROLD;
lc               1025 fs/ocfs2/stack_user.c 	rc = ocfs2_live_connection_attach(conn, lc);
lc               1029 fs/ocfs2/stack_user.c 	if (lc->oc_type == NO_CONTROLD) {
lc               1037 fs/ocfs2/stack_user.c 		wait_event(lc->oc_wait, (atomic_read(&lc->oc_this_node) > 0));
lc               1051 fs/ocfs2/stack_user.c 		ocfs2_live_connection_drop(lc);
lc               1052 fs/ocfs2/stack_user.c 		lc = NULL;
lc               1057 fs/ocfs2/stack_user.c 		kfree(lc);
lc               1066 fs/ocfs2/stack_user.c 	struct ocfs2_live_connection *lc = conn->cc_private;
lc               1068 fs/ocfs2/stack_user.c 	if (lc->oc_type == WITH_CONTROLD)
lc               1070 fs/ocfs2/stack_user.c 	else if (lc->oc_type == NO_CONTROLD)
lc               1071 fs/ocfs2/stack_user.c 		rc = atomic_read(&lc->oc_this_node);
lc                242 include/linux/lru_cache.h extern void lc_reset(struct lru_cache *lc);
lc                243 include/linux/lru_cache.h extern void lc_destroy(struct lru_cache *lc);
lc                244 include/linux/lru_cache.h extern void lc_set(struct lru_cache *lc, unsigned int enr, int index);
lc                245 include/linux/lru_cache.h extern void lc_del(struct lru_cache *lc, struct lc_element *element);
lc                247 include/linux/lru_cache.h extern struct lc_element *lc_get_cumulative(struct lru_cache *lc, unsigned int enr);
lc                248 include/linux/lru_cache.h extern struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr);
lc                249 include/linux/lru_cache.h extern struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr);
lc                250 include/linux/lru_cache.h extern struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr);
lc                251 include/linux/lru_cache.h extern unsigned int lc_put(struct lru_cache *lc, struct lc_element *e);
lc                252 include/linux/lru_cache.h extern void lc_committed(struct lru_cache *lc);
lc                255 include/linux/lru_cache.h extern void lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc);
lc                257 include/linux/lru_cache.h extern void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext,
lc                268 include/linux/lru_cache.h static inline int lc_try_lock_for_transaction(struct lru_cache *lc)
lc                270 include/linux/lru_cache.h 	return !test_and_set_bit(__LC_LOCKED, &lc->flags);
lc                282 include/linux/lru_cache.h extern int lc_try_lock(struct lru_cache *lc);
lc                288 include/linux/lru_cache.h static inline void lc_unlock(struct lru_cache *lc)
lc                290 include/linux/lru_cache.h 	clear_bit(__LC_DIRTY, &lc->flags);
lc                291 include/linux/lru_cache.h 	clear_bit_unlock(__LC_LOCKED, &lc->flags);
lc                294 include/linux/lru_cache.h extern bool lc_is_used(struct lru_cache *lc, unsigned int enr);
lc                299 include/linux/lru_cache.h extern struct lc_element *lc_element_by_index(struct lru_cache *lc, unsigned i);
lc                300 include/linux/lru_cache.h extern unsigned int lc_index_of(struct lru_cache *lc, struct lc_element *e);
lc                350 lib/decompress_unlzma.c 				     int lc, uint32_t literal_pos_mask) {
lc                355 lib/decompress_unlzma.c 		 * (((get_pos(wr) & literal_pos_mask) << lc)
lc                356 lib/decompress_unlzma.c 		    + (wr->previous_byte >> (8 - lc))))
lc                546 lib/decompress_unlzma.c 	int lc, pb, lp;
lc                592 lib/decompress_unlzma.c 	lc = header.pos;
lc                593 lib/decompress_unlzma.c 	while (lc >= 9) {
lc                595 lib/decompress_unlzma.c 		lc -= 9;
lc                621 lib/decompress_unlzma.c 	num_probs = LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp));
lc                625 lib/decompress_unlzma.c 	num_probs = LZMA_LITERAL + (LZMA_LIT_SIZE << (lc + lp));
lc                637 lib/decompress_unlzma.c 					lc, literal_pos_mask)) {
lc                 57 lib/kstrtox.c  		unsigned int lc = c | 0x20; /* don't tolower() this line */
lc                 62 lib/kstrtox.c  		else if ('a' <= lc && lc <= 'f')
lc                 63 lib/kstrtox.c  			val = lc - 'a' + 10;
lc                 29 lib/lru_cache.c 	BUG_ON(!lc);			\
lc                 30 lib/lru_cache.c 	BUG_ON(!lc->nr_elements);	\
lc                 31 lib/lru_cache.c 	BUG_ON(test_and_set_bit(__LC_PARANOIA, &lc->flags)); \
lc                 35 lib/lru_cache.c 	clear_bit_unlock(__LC_PARANOIA, &lc->flags); \
lc                 39 lib/lru_cache.c #define PARANOIA_LC_ELEMENT(lc, e) do {	\
lc                 40 lib/lru_cache.c 	struct lru_cache *lc_ = (lc);	\
lc                 55 lib/lru_cache.c int lc_try_lock(struct lru_cache *lc)
lc                 59 lib/lru_cache.c 		val = cmpxchg(&lc->flags, 0, LC_LOCKED);
lc                 68 lib/lru_cache.c 		old = lc->flags & LC_PARANOIA;
lc                 70 lib/lru_cache.c 		val = cmpxchg(&lc->flags, old, new);
lc                 93 lib/lru_cache.c 	struct lru_cache *lc;
lc                114 lib/lru_cache.c 	lc = kzalloc(sizeof(*lc), GFP_KERNEL);
lc                115 lib/lru_cache.c 	if (!lc)
lc                118 lib/lru_cache.c 	INIT_LIST_HEAD(&lc->in_use);
lc                119 lib/lru_cache.c 	INIT_LIST_HEAD(&lc->lru);
lc                120 lib/lru_cache.c 	INIT_LIST_HEAD(&lc->free);
lc                121 lib/lru_cache.c 	INIT_LIST_HEAD(&lc->to_be_changed);
lc                123 lib/lru_cache.c 	lc->name = name;
lc                124 lib/lru_cache.c 	lc->element_size = e_size;
lc                125 lib/lru_cache.c 	lc->element_off = e_off;
lc                126 lib/lru_cache.c 	lc->nr_elements = e_count;
lc                127 lib/lru_cache.c 	lc->max_pending_changes = max_pending_changes;
lc                128 lib/lru_cache.c 	lc->lc_cache = cache;
lc                129 lib/lru_cache.c 	lc->lc_element = element;
lc                130 lib/lru_cache.c 	lc->lc_slot = slot;
lc                137 lib/lru_cache.c 		memset(p, 0, lc->element_size);
lc                142 lib/lru_cache.c 		list_add(&e->list, &lc->free);
lc                146 lib/lru_cache.c 		return lc;
lc                153 lib/lru_cache.c 	kfree(lc);
lc                160 lib/lru_cache.c static void lc_free_by_index(struct lru_cache *lc, unsigned i)
lc                162 lib/lru_cache.c 	void *p = lc->lc_element[i];
lc                165 lib/lru_cache.c 		p -= lc->element_off;
lc                166 lib/lru_cache.c 		kmem_cache_free(lc->lc_cache, p);
lc                174 lib/lru_cache.c void lc_destroy(struct lru_cache *lc)
lc                177 lib/lru_cache.c 	if (!lc)
lc                179 lib/lru_cache.c 	for (i = 0; i < lc->nr_elements; i++)
lc                180 lib/lru_cache.c 		lc_free_by_index(lc, i);
lc                181 lib/lru_cache.c 	kfree(lc->lc_element);
lc                182 lib/lru_cache.c 	kfree(lc->lc_slot);
lc                183 lib/lru_cache.c 	kfree(lc);
lc                193 lib/lru_cache.c void lc_reset(struct lru_cache *lc)
lc                197 lib/lru_cache.c 	INIT_LIST_HEAD(&lc->in_use);
lc                198 lib/lru_cache.c 	INIT_LIST_HEAD(&lc->lru);
lc                199 lib/lru_cache.c 	INIT_LIST_HEAD(&lc->free);
lc                200 lib/lru_cache.c 	INIT_LIST_HEAD(&lc->to_be_changed);
lc                201 lib/lru_cache.c 	lc->used = 0;
lc                202 lib/lru_cache.c 	lc->hits = 0;
lc                203 lib/lru_cache.c 	lc->misses = 0;
lc                204 lib/lru_cache.c 	lc->starving = 0;
lc                205 lib/lru_cache.c 	lc->locked = 0;
lc                206 lib/lru_cache.c 	lc->changed = 0;
lc                207 lib/lru_cache.c 	lc->pending_changes = 0;
lc                208 lib/lru_cache.c 	lc->flags = 0;
lc                209 lib/lru_cache.c 	memset(lc->lc_slot, 0, sizeof(struct hlist_head) * lc->nr_elements);
lc                211 lib/lru_cache.c 	for (i = 0; i < lc->nr_elements; i++) {
lc                212 lib/lru_cache.c 		struct lc_element *e = lc->lc_element[i];
lc                214 lib/lru_cache.c 		p -= lc->element_off;
lc                215 lib/lru_cache.c 		memset(p, 0, lc->element_size);
lc                220 lib/lru_cache.c 		list_add(&e->list, &lc->free);
lc                229 lib/lru_cache.c void lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc)
lc                239 lib/lru_cache.c 		   lc->name, lc->used, lc->nr_elements,
lc                240 lib/lru_cache.c 		   lc->hits, lc->misses, lc->starving, lc->locked, lc->changed);
lc                243 lib/lru_cache.c static struct hlist_head *lc_hash_slot(struct lru_cache *lc, unsigned int enr)
lc                245 lib/lru_cache.c 	return  lc->lc_slot + (enr % lc->nr_elements);
lc                249 lib/lru_cache.c static struct lc_element *__lc_find(struct lru_cache *lc, unsigned int enr,
lc                254 lib/lru_cache.c 	BUG_ON(!lc);
lc                255 lib/lru_cache.c 	BUG_ON(!lc->nr_elements);
lc                256 lib/lru_cache.c 	hlist_for_each_entry(e, lc_hash_slot(lc, enr), colision) {
lc                280 lib/lru_cache.c struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr)
lc                282 lib/lru_cache.c 	return __lc_find(lc, enr, 0);
lc                295 lib/lru_cache.c bool lc_is_used(struct lru_cache *lc, unsigned int enr)
lc                297 lib/lru_cache.c 	struct lc_element *e = __lc_find(lc, enr, 1);
lc                309 lib/lru_cache.c void lc_del(struct lru_cache *lc, struct lc_element *e)
lc                312 lib/lru_cache.c 	PARANOIA_LC_ELEMENT(lc, e);
lc                317 lib/lru_cache.c 	list_move(&e->list, &lc->free);
lc                321 lib/lru_cache.c static struct lc_element *lc_prepare_for_change(struct lru_cache *lc, unsigned new_number)
lc                326 lib/lru_cache.c 	if (!list_empty(&lc->free))
lc                327 lib/lru_cache.c 		n = lc->free.next;
lc                328 lib/lru_cache.c 	else if (!list_empty(&lc->lru))
lc                329 lib/lru_cache.c 		n = lc->lru.prev;
lc                334 lib/lru_cache.c 	PARANOIA_LC_ELEMENT(lc, e);
lc                339 lib/lru_cache.c 	hlist_add_head(&e->colision, lc_hash_slot(lc, new_number));
lc                340 lib/lru_cache.c 	list_move(&e->list, &lc->to_be_changed);
lc                345 lib/lru_cache.c static int lc_unused_element_available(struct lru_cache *lc)
lc                347 lib/lru_cache.c 	if (!list_empty(&lc->free))
lc                349 lib/lru_cache.c 	if (!list_empty(&lc->lru))
lc                361 lib/lru_cache.c static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, unsigned int flags)
lc                366 lib/lru_cache.c 	if (lc->flags & LC_STARVING) {
lc                367 lib/lru_cache.c 		++lc->starving;
lc                371 lib/lru_cache.c 	e = __lc_find(lc, enr, 1);
lc                387 lib/lru_cache.c 			++lc->hits;
lc                391 lib/lru_cache.c 		++lc->hits;
lc                393 lib/lru_cache.c 			lc->used++;
lc                394 lib/lru_cache.c 		list_move(&e->list, &lc->in_use); /* Not evictable... */
lc                399 lib/lru_cache.c 	++lc->misses;
lc                405 lib/lru_cache.c 	test_and_set_bit(__LC_DIRTY, &lc->flags);
lc                410 lib/lru_cache.c 	if (test_bit(__LC_LOCKED, &lc->flags)) {
lc                411 lib/lru_cache.c 		++lc->locked;
lc                418 lib/lru_cache.c 	if (!lc_unused_element_available(lc)) {
lc                419 lib/lru_cache.c 		__set_bit(__LC_STARVING, &lc->flags);
lc                426 lib/lru_cache.c 	if (lc->pending_changes >= lc->max_pending_changes)
lc                429 lib/lru_cache.c 	e = lc_prepare_for_change(lc, enr);
lc                432 lib/lru_cache.c 	clear_bit(__LC_STARVING, &lc->flags);
lc                434 lib/lru_cache.c 	lc->used++;
lc                435 lib/lru_cache.c 	lc->pending_changes++;
lc                480 lib/lru_cache.c struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr)
lc                482 lib/lru_cache.c 	return __lc_get(lc, enr, LC_GET_MAY_CHANGE);
lc                500 lib/lru_cache.c struct lc_element *lc_get_cumulative(struct lru_cache *lc, unsigned int enr)
lc                502 lib/lru_cache.c 	return __lc_get(lc, enr, LC_GET_MAY_CHANGE|LC_GET_MAY_USE_UNCOMMITTED);
lc                521 lib/lru_cache.c struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr)
lc                523 lib/lru_cache.c 	return __lc_get(lc, enr, 0);
lc                534 lib/lru_cache.c void lc_committed(struct lru_cache *lc)
lc                539 lib/lru_cache.c 	list_for_each_entry_safe(e, tmp, &lc->to_be_changed, list) {
lc                541 lib/lru_cache.c 		++lc->changed;
lc                543 lib/lru_cache.c 		list_move(&e->list, &lc->in_use);
lc                545 lib/lru_cache.c 	lc->pending_changes = 0;
lc                559 lib/lru_cache.c unsigned int lc_put(struct lru_cache *lc, struct lc_element *e)
lc                562 lib/lru_cache.c 	PARANOIA_LC_ELEMENT(lc, e);
lc                567 lib/lru_cache.c 		list_move(&e->list, &lc->lru);
lc                568 lib/lru_cache.c 		lc->used--;
lc                569 lib/lru_cache.c 		clear_bit_unlock(__LC_STARVING, &lc->flags);
lc                579 lib/lru_cache.c struct lc_element *lc_element_by_index(struct lru_cache *lc, unsigned i)
lc                581 lib/lru_cache.c 	BUG_ON(i >= lc->nr_elements);
lc                582 lib/lru_cache.c 	BUG_ON(lc->lc_element[i] == NULL);
lc                583 lib/lru_cache.c 	BUG_ON(lc->lc_element[i]->lc_index != i);
lc                584 lib/lru_cache.c 	return lc->lc_element[i];
lc                592 lib/lru_cache.c unsigned int lc_index_of(struct lru_cache *lc, struct lc_element *e)
lc                594 lib/lru_cache.c 	PARANOIA_LC_ELEMENT(lc, e);
lc                606 lib/lru_cache.c void lc_set(struct lru_cache *lc, unsigned int enr, int index)
lc                611 lib/lru_cache.c 	if (index < 0 || index >= lc->nr_elements)
lc                614 lib/lru_cache.c 	e = lc_element_by_index(lc, index);
lc                621 lib/lru_cache.c 		lh = &lc->free;
lc                623 lib/lru_cache.c 		hlist_add_head(&e->colision, lc_hash_slot(lc, enr));
lc                624 lib/lru_cache.c 		lh = &lc->lru;
lc                638 lib/lru_cache.c void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext,
lc                641 lib/lru_cache.c 	unsigned int nr_elements = lc->nr_elements;
lc                647 lib/lru_cache.c 		e = lc_element_by_index(lc, i);
lc                154 lib/xz/xz_dec_lzma2.c 	uint32_t lc;
lc                575 lib/xz/xz_dec_lzma2.c 	uint32_t low = prev_byte >> (8 - s->lzma.lc);
lc                576 lib/xz/xz_dec_lzma2.c 	uint32_t high = (s->dict.pos & s->lzma.literal_pos_mask) << s->lzma.lc;
lc                818 lib/xz/xz_dec_lzma2.c 	s->lzma.lc = props;
lc                820 lib/xz/xz_dec_lzma2.c 	if (s->lzma.lc + s->lzma.literal_pos_mask > 4)
lc                972 lib/zlib_deflate/deftree.c 	unsigned lc     /* match length-MIN_MATCH or unmatched char (if dist==0) */
lc                976 lib/zlib_deflate/deftree.c     s->l_buf[s->last_lit++] = (uch)lc;
lc                979 lib/zlib_deflate/deftree.c         s->dyn_ltree[lc].Freq++;
lc                985 lib/zlib_deflate/deftree.c                (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) &&
lc                988 lib/zlib_deflate/deftree.c         s->dyn_ltree[length_code[lc]+LITERALS+1].Freq++;
lc               1025 lib/zlib_deflate/deftree.c     int lc;             /* match length or unmatched char (if dist == 0) */
lc               1032 lib/zlib_deflate/deftree.c         lc = s->l_buf[lx++];
lc               1034 lib/zlib_deflate/deftree.c             send_code(s, lc, ltree); /* send a literal byte */
lc               1035 lib/zlib_deflate/deftree.c             Tracecv(isgraph(lc), (stderr," '%c' ", lc));
lc               1038 lib/zlib_deflate/deftree.c             code = length_code[lc];
lc               1042 lib/zlib_deflate/deftree.c                 lc -= base_length[code];
lc               1043 lib/zlib_deflate/deftree.c                 send_bits(s, lc, extra);       /* send the extra length bits */
lc                277 lib/zlib_deflate/defutil.h int  zlib_tr_tally        (deflate_state *s, unsigned dist, unsigned lc);
lc                137 net/rds/loop.c 	struct rds_loop_connection *lc;
lc                140 net/rds/loop.c 	lc = kzalloc(sizeof(struct rds_loop_connection), gfp);
lc                141 net/rds/loop.c 	if (!lc)
lc                144 net/rds/loop.c 	INIT_LIST_HEAD(&lc->loop_node);
lc                145 net/rds/loop.c 	lc->conn = conn;
lc                146 net/rds/loop.c 	conn->c_transport_data = lc;
lc                149 net/rds/loop.c 	list_add_tail(&lc->loop_node, &loop_conns);
lc                157 net/rds/loop.c 	struct rds_loop_connection *lc = arg;
lc                160 net/rds/loop.c 	rdsdebug("lc %p\n", lc);
lc                162 net/rds/loop.c 	list_del(&lc->loop_node);
lc                164 net/rds/loop.c 	kfree(lc);
lc                179 net/rds/loop.c 	struct rds_loop_connection *lc, *_lc;
lc                190 net/rds/loop.c 	list_for_each_entry_safe(lc, _lc, &tmp_list, loop_node) {
lc                191 net/rds/loop.c 		WARN_ON(lc->conn->c_passive);
lc                192 net/rds/loop.c 		rds_conn_destroy(lc->conn);
lc                198 net/rds/loop.c 	struct rds_loop_connection *lc, *_lc;
lc                202 net/rds/loop.c 	list_for_each_entry_safe(lc, _lc, &loop_conns, loop_node)  {
lc                203 net/rds/loop.c 		struct net *c_net = read_pnet(&lc->conn->c_net);
lc                207 net/rds/loop.c 		list_move_tail(&lc->loop_node, &tmp_list);
lc                211 net/rds/loop.c 	list_for_each_entry_safe(lc, _lc, &tmp_list, loop_node) {
lc                212 net/rds/loop.c 		WARN_ON(lc->conn->c_passive);
lc                213 net/rds/loop.c 		rds_conn_destroy(lc->conn);
lc                677 net/tipc/netlink_compat.c 				struct tipc_link_config *lc)
lc                681 net/tipc/netlink_compat.c 		return nla_put_u32(skb, TIPC_NLA_PROP_PRIO, ntohl(lc->value));
lc                683 net/tipc/netlink_compat.c 		return nla_put_u32(skb, TIPC_NLA_PROP_TOL, ntohl(lc->value));
lc                685 net/tipc/netlink_compat.c 		return nla_put_u32(skb, TIPC_NLA_PROP_WIN, ntohl(lc->value));
lc                696 net/tipc/netlink_compat.c 	struct tipc_link_config *lc;
lc                698 net/tipc/netlink_compat.c 	lc = (struct tipc_link_config *)TLV_DATA(msg->req);
lc                704 net/tipc/netlink_compat.c 	if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name))
lc                711 net/tipc/netlink_compat.c 	__tipc_add_link_prop(skb, msg, lc);
lc                723 net/tipc/netlink_compat.c 	struct tipc_link_config *lc;
lc                725 net/tipc/netlink_compat.c 	lc = (struct tipc_link_config *)TLV_DATA(msg->req);
lc                731 net/tipc/netlink_compat.c 	if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name))
lc                738 net/tipc/netlink_compat.c 	__tipc_add_link_prop(skb, msg, lc);
lc                750 net/tipc/netlink_compat.c 	struct tipc_link_config *lc;
lc                752 net/tipc/netlink_compat.c 	lc = (struct tipc_link_config *)TLV_DATA(msg->req);
lc                758 net/tipc/netlink_compat.c 	if (nla_put_string(skb, TIPC_NLA_LINK_NAME, lc->name))
lc                765 net/tipc/netlink_compat.c 	__tipc_add_link_prop(skb, msg, lc);
lc                776 net/tipc/netlink_compat.c 	struct tipc_link_config *lc;
lc                781 net/tipc/netlink_compat.c 	lc = (struct tipc_link_config *)TLV_DATA(msg->req);
lc                789 net/tipc/netlink_compat.c 	if (!string_is_valid(lc->name, len))
lc                792 net/tipc/netlink_compat.c 	media = tipc_media_find(lc->name);
lc                798 net/tipc/netlink_compat.c 	bearer = tipc_bearer_find(msg->net, lc->name);
lc               1188 security/selinux/ss/policydb.c 	struct constraint_node *c, *lc;
lc               1194 security/selinux/ss/policydb.c 	lc = NULL;
lc               1200 security/selinux/ss/policydb.c 		if (lc)
lc               1201 security/selinux/ss/policydb.c 			lc->next = c;
lc               1274 security/selinux/ss/policydb.c 		lc = c;
lc                106 tools/perf/util/pmu.c 	char *lc;
lc                112 tools/perf/util/pmu.c 	lc = setlocale(LC_NUMERIC, NULL);
lc                119 tools/perf/util/pmu.c 	lc = strdup(lc);
lc                120 tools/perf/util/pmu.c 	if (!lc) {
lc                136 tools/perf/util/pmu.c 	setlocale(LC_NUMERIC, lc);
lc                137 tools/perf/util/pmu.c 	free(lc);