tc 1155 arch/arm/net/bpf_jit_32.c const s8 *tc; tc 1186 arch/arm/net/bpf_jit_32.c tc = arm_bpf_get_reg64(tcc, tmp, ctx); tc 1187 arch/arm/net/bpf_jit_32.c emit(ARM_CMP_I(tc[0], hi), ctx); tc 1188 arch/arm/net/bpf_jit_32.c _emit(ARM_COND_EQ, ARM_CMP_I(tc[1], lo), ctx); tc 1190 arch/arm/net/bpf_jit_32.c emit(ARM_ADDS_I(tc[1], tc[1], 1), ctx); tc 1191 arch/arm/net/bpf_jit_32.c emit(ARM_ADC_I(tc[0], tc[0], 0), ctx); tc 480 arch/ia64/include/asm/pal.h tc : 1, /* TLB check */ tc 681 arch/ia64/include/asm/pal.h #define pmci_proc_tlb_check pme_processor.tc tc 67 arch/ia64/kernel/efi.c prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc) \ tc 73 arch/ia64/kernel/efi.c if (tc) \ tc 74 arch/ia64/kernel/efi.c atc = adjust_arg(tc); \ tc 403 arch/ia64/kernel/mca_drv.c if (psp->tc || psp->cc || psp->rc || psp->uc) tc 680 arch/ia64/kernel/mca_drv.c if (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc)) tc 367 arch/mips/include/asm/mipsmtregs.h #define settc(tc) \ tc 369 arch/mips/include/asm/mipsmtregs.h write_c0_vpecontrol((read_c0_vpecontrol()&~VPECONTROL_TARGTC) | (tc)); \ tc 98 arch/mips/include/asm/txx9/tx3927.h volatile unsigned long tc; /* +90 */ tc 70 arch/mips/include/asm/vpe.h struct list_head tc; tc 89 arch/mips/include/asm/vpe.h struct list_head tc; /* The list of TC's with this VPE */ tc 117 arch/mips/include/asm/vpe.h struct tc *get_tc(int index); tc 119 arch/mips/include/asm/vpe.h struct tc *alloc_tc(int index); tc 126 arch/mips/include/asm/vpe.h void cleanup_tc(struct tc *tc); tc 58 arch/mips/kernel/mips-mt.c int tc; tc 73 arch/mips/kernel/mips-mt.c for (tc = 0; tc < ntc; tc++) { tc 74 arch/mips/kernel/mips-mt.c settc(tc); tc 95 arch/mips/kernel/mips-mt.c for (tc = 0; tc < ntc; tc++) { tc 96 arch/mips/kernel/mips-mt.c settc(tc); tc 101 arch/mips/kernel/mips-mt.c printk(" TC %d (current TC with VPE EPC above)\n", tc); tc 106 arch/mips/kernel/mips-mt.c printk(" TC %d\n", tc); tc 46 arch/mips/kernel/smp-mt.c static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0, tc 49 arch/mips/kernel/smp-mt.c if (tc > ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)) tc 53 arch/mips/kernel/smp-mt.c if (tc != 0) { tc 63 arch/mips/kernel/smp-mt.c set_cpu_possible(tc, true); tc 64 arch/mips/kernel/smp-mt.c set_cpu_present(tc, true); tc 65 arch/mips/kernel/smp-mt.c __cpu_number_map[tc] = ++ncpu; tc 66 arch/mips/kernel/smp-mt.c __cpu_logical_map[ncpu] = tc; tc 72 arch/mips/kernel/smp-mt.c if (tc != 0) tc 75 arch/mips/kernel/smp-mt.c cpu_set_vpe_id(&cpu_data[ncpu], tc); tc 80 arch/mips/kernel/smp-mt.c static void __init smvp_tc_init(unsigned int tc, unsigned int mvpconf0) tc 84 arch/mips/kernel/smp-mt.c if (!tc) tc 89 arch/mips/kernel/smp-mt.c if (tc >= (((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)+1)) tc 92 arch/mips/kernel/smp-mt.c write_tc_c0_tcbind(read_tc_c0_tcbind() | tc); tc 95 arch/mips/kernel/smp-mt.c write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | (tc << VPECONF0_XTC_SHIFT)); tc 185 arch/mips/kernel/smp-mt.c unsigned int mvpconf0, ntc, tc, ncpu = 0; tc 211 arch/mips/kernel/smp-mt.c for (tc = 0; tc <= ntc; tc++) { tc 212 arch/mips/kernel/smp-mt.c settc(tc); tc 214 arch/mips/kernel/smp-mt.c smvp_tc_init(tc, mvpconf0); tc 215 arch/mips/kernel/smp-mt.c ncpu = smvp_vpe_init(tc, mvpconf0, ncpu); tc 19 arch/mips/kernel/vpe-cmp.c void cleanup_tc(struct tc *tc) tc 93 arch/mips/kernel/vpe-cmp.c struct tc *t; tc 149 arch/mips/kernel/vpe-cmp.c list_add(&t->tc, &v->tc); tc 31 arch/mips/kernel/vpe-mt.c struct tc *t; tc 46 arch/mips/kernel/vpe-mt.c if (list_empty(&v->tc)) { tc 57 arch/mips/kernel/vpe-mt.c t = list_first_entry(&v->tc, struct tc, tc); tc 147 arch/mips/kernel/vpe-mt.c void cleanup_tc(struct tc *tc) tc 159 arch/mips/kernel/vpe-mt.c settc(tc->index); tc 209 arch/mips/kernel/vpe-mt.c struct tc *t; tc 214 arch/mips/kernel/vpe-mt.c t = list_entry(v->tc.next, struct tc, tc); tc 230 arch/mips/kernel/vpe-mt.c struct tc *t; tc 233 arch/mips/kernel/vpe-mt.c t = list_entry(v->tc.next, struct tc, tc); tc 333 arch/mips/kernel/vpe-mt.c struct tc *t; tc 334 arch/mips/kernel/vpe-mt.c int tc, err; tc 389 arch/mips/kernel/vpe-mt.c for (tc = aprp_cpu_index(); tc < hw_tcs; tc++) { tc 398 arch/mips/kernel/vpe-mt.c t = alloc_tc(tc); tc 410 arch/mips/kernel/vpe-mt.c if (tc < hw_tcs) { tc 411 arch/mips/kernel/vpe-mt.c settc(tc); tc 413 arch/mips/kernel/vpe-mt.c v = alloc_vpe(tc); tc 422 arch/mips/kernel/vpe-mt.c list_add(&t->tc, &v->tc); tc 425 arch/mips/kernel/vpe-mt.c if (tc >= aprp_cpu_index()) { tc 439 arch/mips/kernel/vpe-mt.c if (tc >= vpelimit) { tc 451 arch/mips/kernel/vpe-mt.c if (tc >= aprp_cpu_index()) { tc 454 arch/mips/kernel/vpe-mt.c settc(tc); tc 74 arch/mips/kernel/vpe.c struct tc *get_tc(int index) tc 76 arch/mips/kernel/vpe.c struct tc *res, *t; tc 100 arch/mips/kernel/vpe.c INIT_LIST_HEAD(&v->tc); tc 113 arch/mips/kernel/vpe.c struct tc *alloc_tc(int index) tc 115 arch/mips/kernel/vpe.c struct tc *tc; tc 117 arch/mips/kernel/vpe.c tc = kzalloc(sizeof(struct tc), GFP_KERNEL); tc 118 arch/mips/kernel/vpe.c if (tc == NULL) tc 121 arch/mips/kernel/vpe.c INIT_LIST_HEAD(&tc->tc); tc 122 arch/mips/kernel/vpe.c tc->index = index; tc 125 arch/mips/kernel/vpe.c list_add_tail(&tc->list, &vpecontrol.tc_list); tc 129 arch/mips/kernel/vpe.c return tc; tc 189 arch/mips/pci/ops-tx3927.c tx3927_pcicptr->tc = TX3927_PCIC_TC_OF8E | TX3927_PCIC_TC_IF8E; tc 699 arch/x86/platform/efi/efi_64.c static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc) tc 709 arch/x86/platform/efi/efi_64.c phys_tc = virt_to_phys_or_null(tc); tc 462 drivers/ata/pata_mpc52xx.c unsigned int tc = min(cur_len, MAX_DMA_BUFFER_SIZE); tc 467 drivers/ata/pata_mpc52xx.c bd->status = tc; tc 472 drivers/ata/pata_mpc52xx.c bd->status = tc; tc 480 drivers/ata/pata_mpc52xx.c cur_addr += tc; tc 481 drivers/ata/pata_mpc52xx.c cur_len -= tc; tc 670 drivers/atm/firestream.c void *tc; tc 681 drivers/atm/firestream.c tc = bus_to_virt (qe->p0); tc 682 drivers/atm/firestream.c fs_dprintk (FS_DEBUG_ALLOC, "Free tc: %p\n", tc); tc 683 drivers/atm/firestream.c kfree (tc); tc 852 drivers/atm/firestream.c struct fs_transmit_config *tc; tc 935 drivers/atm/firestream.c tc = kmalloc (sizeof (struct fs_transmit_config), GFP_KERNEL); tc 937 drivers/atm/firestream.c tc, sizeof (struct fs_transmit_config)); tc 938 drivers/atm/firestream.c if (!tc) { tc 953 drivers/atm/firestream.c tc->flags = 0 tc 961 drivers/atm/firestream.c tc->flags = 0 tc 969 drivers/atm/firestream.c tc->flags = 0; tc 973 drivers/atm/firestream.c tc->atm_hdr = (vpi << 20) | (vci << 4); tc 1000 drivers/atm/firestream.c kfree(tc); tc 1007 drivers/atm/firestream.c tc->TMC[0] = tmc0 | 0x4000; tc 1008 drivers/atm/firestream.c tc->TMC[1] = 0; /* Unused */ tc 1009 drivers/atm/firestream.c tc->TMC[2] = 0; /* Unused */ tc 1010 drivers/atm/firestream.c tc->TMC[3] = 0; /* Unused */ tc 1012 drivers/atm/firestream.c tc->spec = 0; /* UTOPIA address, UDF, HEC: Unused -> 0 */ tc 1013 drivers/atm/firestream.c tc->rtag[0] = 0; /* What should I do with routing tags??? tc 1015 drivers/atm/firestream.c tc->rtag[1] = 0; tc 1016 drivers/atm/firestream.c tc->rtag[2] = 0; tc 1020 drivers/atm/firestream.c my_hd (tc, sizeof (*tc)); tc 1036 drivers/atm/firestream.c virt_to_bus (tc), 0, 0); tc 67 drivers/base/transport_class.c static int anon_transport_dummy_function(struct transport_container *tc, tc 259 drivers/clocksource/timer-atmel-tcb.c static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) tc 262 drivers/clocksource/timer-atmel-tcb.c struct clk *t2_clk = tc->clk[2]; tc 263 drivers/clocksource/timer-atmel-tcb.c int irq = tc->irq[2]; tc 265 drivers/clocksource/timer-atmel-tcb.c ret = clk_prepare_enable(tc->slow_clk); tc 272 drivers/clocksource/timer-atmel-tcb.c clk_disable_unprepare(tc->slow_clk); tc 278 drivers/clocksource/timer-atmel-tcb.c clkevt.regs = tc->regs; tc 288 drivers/clocksource/timer-atmel-tcb.c clk_disable_unprepare(tc->slow_clk); tc 299 drivers/clocksource/timer-atmel-tcb.c static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) tc 307 drivers/clocksource/timer-atmel-tcb.c static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx) tc 335 drivers/clocksource/timer-atmel-tcb.c static void __init tcb_setup_single_chan(struct atmel_tc *tc, int mck_divisor_idx) tc 359 drivers/clocksource/timer-atmel-tcb.c struct atmel_tc tc; tc 374 drivers/clocksource/timer-atmel-tcb.c tc.regs = of_iomap(node->parent, 0); tc 375 drivers/clocksource/timer-atmel-tcb.c if (!tc.regs) tc 382 drivers/clocksource/timer-atmel-tcb.c tc.slow_clk = of_clk_get_by_name(node->parent, "slow_clk"); tc 383 drivers/clocksource/timer-atmel-tcb.c if (IS_ERR(tc.slow_clk)) tc 384 drivers/clocksource/timer-atmel-tcb.c return PTR_ERR(tc.slow_clk); tc 386 drivers/clocksource/timer-atmel-tcb.c tc.clk[0] = t0_clk; tc 387 drivers/clocksource/timer-atmel-tcb.c tc.clk[1] = of_clk_get_by_name(node->parent, "t1_clk"); tc 388 drivers/clocksource/timer-atmel-tcb.c if (IS_ERR(tc.clk[1])) tc 389 drivers/clocksource/timer-atmel-tcb.c tc.clk[1] = t0_clk; tc 390 drivers/clocksource/timer-atmel-tcb.c tc.clk[2] = of_clk_get_by_name(node->parent, "t2_clk"); tc 391 drivers/clocksource/timer-atmel-tcb.c if (IS_ERR(tc.clk[2])) tc 392 drivers/clocksource/timer-atmel-tcb.c tc.clk[2] = t0_clk; tc 394 drivers/clocksource/timer-atmel-tcb.c tc.irq[2] = of_irq_get(node->parent, 2); tc 395 drivers/clocksource/timer-atmel-tcb.c if (tc.irq[2] <= 0) { tc 396 drivers/clocksource/timer-atmel-tcb.c tc.irq[2] = of_irq_get(node->parent, 0); tc 397 drivers/clocksource/timer-atmel-tcb.c if (tc.irq[2] <= 0) tc 404 drivers/clocksource/timer-atmel-tcb.c for (i = 0; i < ARRAY_SIZE(tc.irq); i++) tc 405 drivers/clocksource/timer-atmel-tcb.c writel(ATMEL_TC_ALL_IRQ, tc.regs + ATMEL_TC_REG(i, IDR)); tc 440 drivers/clocksource/timer-atmel-tcb.c tcaddr = tc.regs; tc 446 drivers/clocksource/timer-atmel-tcb.c tcb_setup_single_chan(&tc, best_divisor_idx); tc 453 drivers/clocksource/timer-atmel-tcb.c ret = clk_prepare_enable(tc.clk[1]); tc 459 drivers/clocksource/timer-atmel-tcb.c tcb_setup_dual_chan(&tc, best_divisor_idx); tc 470 drivers/clocksource/timer-atmel-tcb.c ret = setup_clkevents(&tc, clk32k_divisor_idx); tc 486 drivers/clocksource/timer-atmel-tcb.c clk_disable_unprepare(tc.clk[1]); tc 2295 drivers/dma/amba-pl08x.c u32 mask = 0, err, tc, i; tc 2304 drivers/dma/amba-pl08x.c tc = readl(pl08x->base + PL080_TC_STATUS); tc 2305 drivers/dma/amba-pl08x.c if (tc) tc 2306 drivers/dma/amba-pl08x.c writel(tc, pl08x->base + PL080_TC_CLEAR); tc 2308 drivers/dma/amba-pl08x.c if (!err && !tc) tc 2312 drivers/dma/amba-pl08x.c if ((BIT(i) & err) || (BIT(i) & tc)) { tc 374 drivers/dma/s3c24xx-dma.c u32 tc = readl(phy->base + S3C24XX_DSTAT) & S3C24XX_DSTAT_CURRTC_MASK; tc 376 drivers/dma/s3c24xx-dma.c return tc * txd->width; tc 113 drivers/dma/tegra210-adma.c unsigned int tc; tc 373 drivers/dma/tegra210-adma.c tdma_ch_write(tdc, ADMA_CH_TC, ch_regs->tc); tc 527 drivers/dma/tegra210-adma.c residual = desc->ch_regs.tc; tc 603 drivers/dma/tegra210-adma.c ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK; tc 730 drivers/dma/tegra210-adma.c ch_reg->tc = tdma_ch_read(tdc, ADMA_CH_TC); tc 767 drivers/dma/tegra210-adma.c tdma_ch_write(tdc, ADMA_CH_TC, ch_reg->tc); tc 229 drivers/dma/ti/edma.c struct edma_tc *tc; tc 871 drivers/dma/ti/edma.c if (!echan->tc && echan->edesc->cyclic) tc 1416 drivers/dma/ti/edma.c if (!echan->tc) tc 1643 drivers/dma/ti/edma.c if (echan->tc) { tc 1644 drivers/dma/ti/edma.c eventq_no = echan->tc->id; tc 1647 drivers/dma/ti/edma.c echan->tc = &ecc->tc_list[ecc->info->default_queue]; tc 1648 drivers/dma/ti/edma.c eventq_no = echan->tc->id; tc 1707 drivers/dma/ti/edma.c echan->tc = NULL; tc 2222 drivers/dma/ti/edma.c echan->tc = &echan->ecc->tc_list[dma_spec->args[1]]; tc 276 drivers/dma/zx_dma.c u32 tc = readl_relaxed(d->base + REG_ZX_TC_IRQ); tc 282 drivers/dma/zx_dma.c while (tc) { tc 283 drivers/dma/zx_dma.c i = __ffs(tc); tc 284 drivers/dma/zx_dma.c tc &= ~BIT(i); tc 251 drivers/firmware/efi/runtime-wrappers.c static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) tc 257 drivers/firmware/efi/runtime-wrappers.c status = efi_queue_work(EFI_GET_TIME, tm, tc, NULL, NULL, NULL); tc 283 drivers/gpu/drm/bridge/tc358767.c static inline int tc_poll_timeout(struct tc_data *tc, unsigned int addr, tc 290 drivers/gpu/drm/bridge/tc358767.c return regmap_read_poll_timeout(tc->regmap, addr, val, tc 295 drivers/gpu/drm/bridge/tc358767.c static int tc_aux_wait_busy(struct tc_data *tc) tc 297 drivers/gpu/drm/bridge/tc358767.c return tc_poll_timeout(tc, DP0_AUXSTATUS, AUX_BUSY, 0, 100, 100000); tc 300 drivers/gpu/drm/bridge/tc358767.c static int tc_aux_write_data(struct tc_data *tc, const void *data, tc 308 drivers/gpu/drm/bridge/tc358767.c ret = regmap_raw_write(tc->regmap, DP0_AUXWDATA(0), auxwdata, count); tc 315 drivers/gpu/drm/bridge/tc358767.c static int tc_aux_read_data(struct tc_data *tc, void *data, size_t size) tc 320 drivers/gpu/drm/bridge/tc358767.c ret = regmap_raw_read(tc->regmap, DP0_AUXRDATA(0), auxrdata, count); tc 344 drivers/gpu/drm/bridge/tc358767.c struct tc_data *tc = aux_to_tc(aux); tc 350 drivers/gpu/drm/bridge/tc358767.c ret = tc_aux_wait_busy(tc); tc 361 drivers/gpu/drm/bridge/tc358767.c ret = tc_aux_write_data(tc, msg->buffer, size); tc 371 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, DP0_AUXADDR, msg->address); tc 375 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, DP0_AUXCFG0, tc_auxcfg0(msg, size)); tc 379 drivers/gpu/drm/bridge/tc358767.c ret = tc_aux_wait_busy(tc); tc 383 drivers/gpu/drm/bridge/tc358767.c ret = regmap_read(tc->regmap, DP0_AUXSTATUS, &auxstatus); tc 403 drivers/gpu/drm/bridge/tc358767.c return tc_aux_read_data(tc, msg->buffer, size); tc 428 drivers/gpu/drm/bridge/tc358767.c static u32 tc_srcctrl(struct tc_data *tc) tc 436 drivers/gpu/drm/bridge/tc358767.c if (tc->link.scrambler_dis) tc 438 drivers/gpu/drm/bridge/tc358767.c if (tc->link.spread) tc 440 drivers/gpu/drm/bridge/tc358767.c if (tc->link.base.num_lanes == 2) tc 442 drivers/gpu/drm/bridge/tc358767.c if (tc->link.base.rate != 162000) tc 447 drivers/gpu/drm/bridge/tc358767.c static int tc_pllupdate(struct tc_data *tc, unsigned int pllctrl) tc 451 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, pllctrl, PLLUPDATE | PLLEN); tc 461 drivers/gpu/drm/bridge/tc358767.c static int tc_pxl_pll_en(struct tc_data *tc, u32 refclk, u32 pixelclock) tc 474 drivers/gpu/drm/bridge/tc358767.c dev_dbg(tc->dev, "PLL: requested %d pixelclock, ref %d\n", pixelclock, tc 522 drivers/gpu/drm/bridge/tc358767.c dev_err(tc->dev, "Failed to calc clock for %d pixelclock\n", tc 527 drivers/gpu/drm/bridge/tc358767.c dev_dbg(tc->dev, "PLL: got %d, delta %d\n", best_pixelclock, tc 529 drivers/gpu/drm/bridge/tc358767.c dev_dbg(tc->dev, "PLL: %d / %d / %d * %d / %d\n", refclk, tc 542 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, PXL_PLLCTRL, PLLBYP | PLLEN); tc 553 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, PXL_PLLPARAM, pxl_pllparam); tc 558 drivers/gpu/drm/bridge/tc358767.c return tc_pllupdate(tc, PXL_PLLCTRL); tc 561 drivers/gpu/drm/bridge/tc358767.c static int tc_pxl_pll_dis(struct tc_data *tc) tc 564 drivers/gpu/drm/bridge/tc358767.c return regmap_write(tc->regmap, PXL_PLLCTRL, PLLBYP); tc 567 drivers/gpu/drm/bridge/tc358767.c static int tc_stream_clock_calc(struct tc_data *tc) tc 584 drivers/gpu/drm/bridge/tc358767.c return regmap_write(tc->regmap, DP0_VIDMNGEN1, 32768); tc 587 drivers/gpu/drm/bridge/tc358767.c static int tc_set_syspllparam(struct tc_data *tc) tc 592 drivers/gpu/drm/bridge/tc358767.c rate = clk_get_rate(tc->refclk); tc 607 drivers/gpu/drm/bridge/tc358767.c dev_err(tc->dev, "Invalid refclk rate: %lu Hz\n", rate); tc 611 drivers/gpu/drm/bridge/tc358767.c return regmap_write(tc->regmap, SYS_PLLPARAM, pllparam); tc 614 drivers/gpu/drm/bridge/tc358767.c static int tc_aux_link_setup(struct tc_data *tc) tc 620 drivers/gpu/drm/bridge/tc358767.c ret = tc_set_syspllparam(tc); tc 624 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, DP_PHY_CTRL, tc 632 drivers/gpu/drm/bridge/tc358767.c ret = tc_pllupdate(tc, DP0_PLLCTRL); tc 636 drivers/gpu/drm/bridge/tc358767.c ret = tc_pllupdate(tc, DP1_PLLCTRL); tc 640 drivers/gpu/drm/bridge/tc358767.c ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 100, 100000); tc 642 drivers/gpu/drm/bridge/tc358767.c dev_err(tc->dev, "Timeout waiting for PHY to become ready"); tc 653 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, DP0_AUXCFG1, dp0_auxcfg1); tc 659 drivers/gpu/drm/bridge/tc358767.c dev_err(tc->dev, "tc_aux_link_setup failed: %d\n", ret); tc 663 drivers/gpu/drm/bridge/tc358767.c static int tc_get_display_props(struct tc_data *tc) tc 669 drivers/gpu/drm/bridge/tc358767.c ret = drm_dp_link_probe(&tc->aux, &tc->link.base); tc 672 drivers/gpu/drm/bridge/tc358767.c if (tc->link.base.rate != 162000 && tc->link.base.rate != 270000) { tc 673 drivers/gpu/drm/bridge/tc358767.c dev_dbg(tc->dev, "Falling to 2.7 Gbps rate\n"); tc 674 drivers/gpu/drm/bridge/tc358767.c tc->link.base.rate = 270000; tc 677 drivers/gpu/drm/bridge/tc358767.c if (tc->link.base.num_lanes > 2) { tc 678 drivers/gpu/drm/bridge/tc358767.c dev_dbg(tc->dev, "Falling to 2 lanes\n"); tc 679 drivers/gpu/drm/bridge/tc358767.c tc->link.base.num_lanes = 2; tc 682 drivers/gpu/drm/bridge/tc358767.c ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, ®); tc 685 drivers/gpu/drm/bridge/tc358767.c tc->link.spread = reg & DP_MAX_DOWNSPREAD_0_5; tc 687 drivers/gpu/drm/bridge/tc358767.c ret = drm_dp_dpcd_readb(&tc->aux, DP_MAIN_LINK_CHANNEL_CODING, ®); tc 691 drivers/gpu/drm/bridge/tc358767.c tc->link.scrambler_dis = false; tc 693 drivers/gpu/drm/bridge/tc358767.c ret = drm_dp_dpcd_readb(&tc->aux, DP_EDP_CONFIGURATION_SET, ®); tc 696 drivers/gpu/drm/bridge/tc358767.c tc->link.assr = reg & DP_ALTERNATE_SCRAMBLER_RESET_ENABLE; tc 698 drivers/gpu/drm/bridge/tc358767.c dev_dbg(tc->dev, "DPCD rev: %d.%d, rate: %s, lanes: %d, framing: %s\n", tc 699 drivers/gpu/drm/bridge/tc358767.c tc->link.base.revision >> 4, tc->link.base.revision & 0x0f, tc 700 drivers/gpu/drm/bridge/tc358767.c (tc->link.base.rate == 162000) ? "1.62Gbps" : "2.7Gbps", tc 701 drivers/gpu/drm/bridge/tc358767.c tc->link.base.num_lanes, tc 702 drivers/gpu/drm/bridge/tc358767.c (tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) ? tc 704 drivers/gpu/drm/bridge/tc358767.c dev_dbg(tc->dev, "Downspread: %s, scrambler: %s\n", tc 705 drivers/gpu/drm/bridge/tc358767.c tc->link.spread ? "0.5%" : "0.0%", tc 706 drivers/gpu/drm/bridge/tc358767.c tc->link.scrambler_dis ? "disabled" : "enabled"); tc 707 drivers/gpu/drm/bridge/tc358767.c dev_dbg(tc->dev, "Display ASSR: %d, TC358767 ASSR: %d\n", tc 708 drivers/gpu/drm/bridge/tc358767.c tc->link.assr, tc->assr); tc 713 drivers/gpu/drm/bridge/tc358767.c dev_err(tc->dev, "failed to read DPCD: %d\n", ret); tc 717 drivers/gpu/drm/bridge/tc358767.c static int tc_set_video_mode(struct tc_data *tc, tc 742 drivers/gpu/drm/bridge/tc358767.c out_bw = tc->link.base.num_lanes * tc->link.base.rate; tc 745 drivers/gpu/drm/bridge/tc358767.c dev_dbg(tc->dev, "set mode %dx%d\n", tc 747 drivers/gpu/drm/bridge/tc358767.c dev_dbg(tc->dev, "H margin %d,%d sync %d\n", tc 749 drivers/gpu/drm/bridge/tc358767.c dev_dbg(tc->dev, "V margin %d,%d sync %d\n", tc 751 drivers/gpu/drm/bridge/tc358767.c dev_dbg(tc->dev, "total: %dx%d\n", mode->htotal, mode->vtotal); tc 760 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, VPCTRL0, tc 766 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, HTIM01, tc 772 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, HTIM02, tc 778 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, VTIM01, tc 784 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, VTIM02, tc 790 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, VFUEN0, VFUEN); /* update settings */ tc 795 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, TSTCTL, tc 806 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, DP0_VIDSYNCDELAY, tc 810 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, DP0_TOTALVAL, tc 816 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, DP0_STARTVAL, tc 822 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, DP0_ACTIVEVAL, tc 837 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, DP0_SYNCVAL, dp0_syncval); tc 841 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, DPIPXLFMT, tc 848 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, DP0_MISC, tc 858 drivers/gpu/drm/bridge/tc358767.c static int tc_wait_link_training(struct tc_data *tc) tc 863 drivers/gpu/drm/bridge/tc358767.c ret = tc_poll_timeout(tc, DP0_LTSTAT, LT_LOOPDONE, tc 866 drivers/gpu/drm/bridge/tc358767.c dev_err(tc->dev, "Link training timeout waiting for LT_LOOPDONE!\n"); tc 870 drivers/gpu/drm/bridge/tc358767.c ret = regmap_read(tc->regmap, DP0_LTSTAT, &value); tc 877 drivers/gpu/drm/bridge/tc358767.c static int tc_main_link_enable(struct tc_data *tc) tc 879 drivers/gpu/drm/bridge/tc358767.c struct drm_dp_aux *aux = &tc->aux; tc 880 drivers/gpu/drm/bridge/tc358767.c struct device *dev = tc->dev; tc 886 drivers/gpu/drm/bridge/tc358767.c dev_dbg(tc->dev, "link enable\n"); tc 888 drivers/gpu/drm/bridge/tc358767.c ret = regmap_read(tc->regmap, DP0CTL, &value); tc 893 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, DP0CTL, 0); tc 898 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, DP0_SRCCTRL, tc_srcctrl(tc)); tc 902 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, DP1_SRCCTRL, tc 903 drivers/gpu/drm/bridge/tc358767.c (tc->link.spread ? DP0_SRCCTRL_SSCG : 0) | tc 904 drivers/gpu/drm/bridge/tc358767.c ((tc->link.base.rate != 162000) ? DP0_SRCCTRL_BW27 : 0)); tc 908 drivers/gpu/drm/bridge/tc358767.c ret = tc_set_syspllparam(tc); tc 914 drivers/gpu/drm/bridge/tc358767.c if (tc->link.base.num_lanes == 2) tc 917 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, DP_PHY_CTRL, dp_phy_ctrl); tc 922 drivers/gpu/drm/bridge/tc358767.c ret = tc_pllupdate(tc, DP0_PLLCTRL); tc 926 drivers/gpu/drm/bridge/tc358767.c ret = tc_pllupdate(tc, DP1_PLLCTRL); tc 932 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, DP_PHY_CTRL, dp_phy_ctrl); tc 935 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, DP_PHY_CTRL, dp_phy_ctrl); tc 937 drivers/gpu/drm/bridge/tc358767.c ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 500, 100000); tc 944 drivers/gpu/drm/bridge/tc358767.c ret = regmap_update_bits(tc->regmap, DP0_MISC, BPC_8, BPC_8); tc 955 drivers/gpu/drm/bridge/tc358767.c if (tc->assr != tc->link.assr) { tc 957 drivers/gpu/drm/bridge/tc358767.c tc->assr); tc 959 drivers/gpu/drm/bridge/tc358767.c tmp[0] = tc->assr; tc 968 drivers/gpu/drm/bridge/tc358767.c if (tmp[0] != tc->assr) { tc 970 drivers/gpu/drm/bridge/tc358767.c tc->assr); tc 972 drivers/gpu/drm/bridge/tc358767.c tc->link.scrambler_dis = true; tc 977 drivers/gpu/drm/bridge/tc358767.c ret = drm_dp_link_configure(aux, &tc->link.base); tc 982 drivers/gpu/drm/bridge/tc358767.c tmp[0] = tc->link.spread ? DP_SPREAD_AMP_0_5 : 0x00; tc 999 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, DP0_SNKLTCTRL, tc 1005 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, DP0_LTLOOPCTRL, tc 1012 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, DP0_SRCCTRL, tc 1013 drivers/gpu/drm/bridge/tc358767.c tc_srcctrl(tc) | DP0_SRCCTRL_SCRMBLDIS | tc 1020 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, DP0CTL, tc 1021 drivers/gpu/drm/bridge/tc358767.c ((tc->link.base.capabilities & tc 1029 drivers/gpu/drm/bridge/tc358767.c ret = tc_wait_link_training(tc); tc 1034 drivers/gpu/drm/bridge/tc358767.c dev_err(tc->dev, "Link training phase 1 failed: %s\n", tc 1042 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, DP0_SNKLTCTRL, tc 1048 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, DP0_SRCCTRL, tc 1049 drivers/gpu/drm/bridge/tc358767.c tc_srcctrl(tc) | DP0_SRCCTRL_SCRMBLDIS | tc 1056 drivers/gpu/drm/bridge/tc358767.c ret = tc_wait_link_training(tc); tc 1061 drivers/gpu/drm/bridge/tc358767.c dev_err(tc->dev, "Link training phase 2 failed: %s\n", tc 1076 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, DP0_SRCCTRL, tc_srcctrl(tc) | tc 1083 drivers/gpu/drm/bridge/tc358767.c tmp[0] = tc->link.scrambler_dis ? DP_LINK_SCRAMBLING_DISABLE : 0x00; tc 1098 drivers/gpu/drm/bridge/tc358767.c dev_err(tc->dev, "Lane 0 failed: %x\n", value); tc 1102 drivers/gpu/drm/bridge/tc358767.c if (tc->link.base.num_lanes == 2) { tc 1106 drivers/gpu/drm/bridge/tc358767.c dev_err(tc->dev, "Lane 1 failed: %x\n", value); tc 1111 drivers/gpu/drm/bridge/tc358767.c dev_err(tc->dev, "Interlane align failed\n"); tc 1128 drivers/gpu/drm/bridge/tc358767.c dev_err(tc->dev, "Failed to read DPCD: %d\n", ret); tc 1131 drivers/gpu/drm/bridge/tc358767.c dev_err(tc->dev, "Failed to write DPCD: %d\n", ret); tc 1135 drivers/gpu/drm/bridge/tc358767.c static int tc_main_link_disable(struct tc_data *tc) tc 1139 drivers/gpu/drm/bridge/tc358767.c dev_dbg(tc->dev, "link disable\n"); tc 1141 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, DP0_SRCCTRL, 0); tc 1145 drivers/gpu/drm/bridge/tc358767.c return regmap_write(tc->regmap, DP0CTL, 0); tc 1148 drivers/gpu/drm/bridge/tc358767.c static int tc_stream_enable(struct tc_data *tc) tc 1153 drivers/gpu/drm/bridge/tc358767.c dev_dbg(tc->dev, "enable video stream\n"); tc 1157 drivers/gpu/drm/bridge/tc358767.c ret = tc_pxl_pll_en(tc, clk_get_rate(tc->refclk), tc 1158 drivers/gpu/drm/bridge/tc358767.c 1000 * tc->mode.clock); tc 1163 drivers/gpu/drm/bridge/tc358767.c ret = tc_set_video_mode(tc, &tc->mode); tc 1168 drivers/gpu/drm/bridge/tc358767.c ret = tc_stream_clock_calc(tc); tc 1173 drivers/gpu/drm/bridge/tc358767.c if (tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) tc 1175 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, DP0CTL, value); tc 1187 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, DP0CTL, value); tc 1196 drivers/gpu/drm/bridge/tc358767.c ret = regmap_write(tc->regmap, SYSCTRL, value); tc 1203 drivers/gpu/drm/bridge/tc358767.c static int tc_stream_disable(struct tc_data *tc) tc 1207 drivers/gpu/drm/bridge/tc358767.c dev_dbg(tc->dev, "disable video stream\n"); tc 1209 drivers/gpu/drm/bridge/tc358767.c ret = regmap_update_bits(tc->regmap, DP0CTL, VID_EN, 0); tc 1213 drivers/gpu/drm/bridge/tc358767.c tc_pxl_pll_dis(tc); tc 1220 drivers/gpu/drm/bridge/tc358767.c struct tc_data *tc = bridge_to_tc(bridge); tc 1222 drivers/gpu/drm/bridge/tc358767.c drm_panel_prepare(tc->panel); tc 1227 drivers/gpu/drm/bridge/tc358767.c struct tc_data *tc = bridge_to_tc(bridge); tc 1230 drivers/gpu/drm/bridge/tc358767.c ret = tc_get_display_props(tc); tc 1232 drivers/gpu/drm/bridge/tc358767.c dev_err(tc->dev, "failed to read display props: %d\n", ret); tc 1236 drivers/gpu/drm/bridge/tc358767.c ret = tc_main_link_enable(tc); tc 1238 drivers/gpu/drm/bridge/tc358767.c dev_err(tc->dev, "main link enable error: %d\n", ret); tc 1242 drivers/gpu/drm/bridge/tc358767.c ret = tc_stream_enable(tc); tc 1244 drivers/gpu/drm/bridge/tc358767.c dev_err(tc->dev, "main link stream start error: %d\n", ret); tc 1245 drivers/gpu/drm/bridge/tc358767.c tc_main_link_disable(tc); tc 1249 drivers/gpu/drm/bridge/tc358767.c drm_panel_enable(tc->panel); tc 1254 drivers/gpu/drm/bridge/tc358767.c struct tc_data *tc = bridge_to_tc(bridge); tc 1257 drivers/gpu/drm/bridge/tc358767.c drm_panel_disable(tc->panel); tc 1259 drivers/gpu/drm/bridge/tc358767.c ret = tc_stream_disable(tc); tc 1261 drivers/gpu/drm/bridge/tc358767.c dev_err(tc->dev, "main link stream stop error: %d\n", ret); tc 1263 drivers/gpu/drm/bridge/tc358767.c ret = tc_main_link_disable(tc); tc 1265 drivers/gpu/drm/bridge/tc358767.c dev_err(tc->dev, "main link disable error: %d\n", ret); tc 1270 drivers/gpu/drm/bridge/tc358767.c struct tc_data *tc = bridge_to_tc(bridge); tc 1272 drivers/gpu/drm/bridge/tc358767.c drm_panel_unprepare(tc->panel); tc 1290 drivers/gpu/drm/bridge/tc358767.c struct tc_data *tc = bridge_to_tc(bridge); tc 1299 drivers/gpu/drm/bridge/tc358767.c avail = tc->link.base.num_lanes * tc->link.base.rate; tc 1311 drivers/gpu/drm/bridge/tc358767.c struct tc_data *tc = bridge_to_tc(bridge); tc 1313 drivers/gpu/drm/bridge/tc358767.c tc->mode = *mode; tc 1318 drivers/gpu/drm/bridge/tc358767.c struct tc_data *tc = connector_to_tc(connector); tc 1323 drivers/gpu/drm/bridge/tc358767.c ret = tc_get_display_props(tc); tc 1325 drivers/gpu/drm/bridge/tc358767.c dev_err(tc->dev, "failed to read display props: %d\n", ret); tc 1329 drivers/gpu/drm/bridge/tc358767.c count = drm_panel_get_modes(tc->panel); tc 1333 drivers/gpu/drm/bridge/tc358767.c edid = drm_get_edid(connector, &tc->aux.ddc); tc 1335 drivers/gpu/drm/bridge/tc358767.c kfree(tc->edid); tc 1336 drivers/gpu/drm/bridge/tc358767.c tc->edid = edid; tc 1353 drivers/gpu/drm/bridge/tc358767.c struct tc_data *tc = connector_to_tc(connector); tc 1358 drivers/gpu/drm/bridge/tc358767.c if (tc->hpd_pin < 0) { tc 1359 drivers/gpu/drm/bridge/tc358767.c if (tc->panel) tc 1365 drivers/gpu/drm/bridge/tc358767.c ret = regmap_read(tc->regmap, GPIOI, &val); tc 1369 drivers/gpu/drm/bridge/tc358767.c conn = val & BIT(tc->hpd_pin); tc 1389 drivers/gpu/drm/bridge/tc358767.c struct tc_data *tc = bridge_to_tc(bridge); tc 1394 drivers/gpu/drm/bridge/tc358767.c drm_connector_helper_add(&tc->connector, &tc_connector_helper_funcs); tc 1395 drivers/gpu/drm/bridge/tc358767.c ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs, tc 1396 drivers/gpu/drm/bridge/tc358767.c tc->panel ? DRM_MODE_CONNECTOR_eDP : tc 1402 drivers/gpu/drm/bridge/tc358767.c if (tc->hpd_pin >= 0) { tc 1403 drivers/gpu/drm/bridge/tc358767.c if (tc->have_irq) tc 1404 drivers/gpu/drm/bridge/tc358767.c tc->connector.polled = DRM_CONNECTOR_POLL_HPD; tc 1406 drivers/gpu/drm/bridge/tc358767.c tc->connector.polled = DRM_CONNECTOR_POLL_CONNECT | tc 1410 drivers/gpu/drm/bridge/tc358767.c if (tc->panel) tc 1411 drivers/gpu/drm/bridge/tc358767.c drm_panel_attach(tc->panel, &tc->connector); tc 1413 drivers/gpu/drm/bridge/tc358767.c drm_display_info_set_bus_formats(&tc->connector.display_info, tc 1415 drivers/gpu/drm/bridge/tc358767.c tc->connector.display_info.bus_flags = tc 1419 drivers/gpu/drm/bridge/tc358767.c drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder); tc 1478 drivers/gpu/drm/bridge/tc358767.c struct tc_data *tc = arg; tc 1482 drivers/gpu/drm/bridge/tc358767.c r = regmap_read(tc->regmap, INTSTS_G, &val); tc 1492 drivers/gpu/drm/bridge/tc358767.c regmap_read(tc->regmap, SYSSTAT, &stat); tc 1494 drivers/gpu/drm/bridge/tc358767.c dev_err(tc->dev, "syserr %x\n", stat); tc 1497 drivers/gpu/drm/bridge/tc358767.c if (tc->hpd_pin >= 0 && tc->bridge.dev) { tc 1504 drivers/gpu/drm/bridge/tc358767.c bool h = val & INT_GPIO_H(tc->hpd_pin); tc 1505 drivers/gpu/drm/bridge/tc358767.c bool lc = val & INT_GPIO_LC(tc->hpd_pin); tc 1507 drivers/gpu/drm/bridge/tc358767.c dev_dbg(tc->dev, "GPIO%d: %s %s\n", tc->hpd_pin, tc 1511 drivers/gpu/drm/bridge/tc358767.c drm_kms_helper_hotplug_event(tc->bridge.dev); tc 1514 drivers/gpu/drm/bridge/tc358767.c regmap_write(tc->regmap, INTSTS_G, val); tc 1522 drivers/gpu/drm/bridge/tc358767.c struct tc_data *tc; tc 1525 drivers/gpu/drm/bridge/tc358767.c tc = devm_kzalloc(dev, sizeof(*tc), GFP_KERNEL); tc 1526 drivers/gpu/drm/bridge/tc358767.c if (!tc) tc 1529 drivers/gpu/drm/bridge/tc358767.c tc->dev = dev; tc 1532 drivers/gpu/drm/bridge/tc358767.c ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL); tc 1537 drivers/gpu/drm/bridge/tc358767.c tc->sd_gpio = devm_gpiod_get_optional(dev, "shutdown", GPIOD_OUT_HIGH); tc 1538 drivers/gpu/drm/bridge/tc358767.c if (IS_ERR(tc->sd_gpio)) tc 1539 drivers/gpu/drm/bridge/tc358767.c return PTR_ERR(tc->sd_gpio); tc 1541 drivers/gpu/drm/bridge/tc358767.c if (tc->sd_gpio) { tc 1542 drivers/gpu/drm/bridge/tc358767.c gpiod_set_value_cansleep(tc->sd_gpio, 0); tc 1547 drivers/gpu/drm/bridge/tc358767.c tc->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); tc 1548 drivers/gpu/drm/bridge/tc358767.c if (IS_ERR(tc->reset_gpio)) tc 1549 drivers/gpu/drm/bridge/tc358767.c return PTR_ERR(tc->reset_gpio); tc 1551 drivers/gpu/drm/bridge/tc358767.c if (tc->reset_gpio) { tc 1552 drivers/gpu/drm/bridge/tc358767.c gpiod_set_value_cansleep(tc->reset_gpio, 1); tc 1556 drivers/gpu/drm/bridge/tc358767.c tc->refclk = devm_clk_get(dev, "ref"); tc 1557 drivers/gpu/drm/bridge/tc358767.c if (IS_ERR(tc->refclk)) { tc 1558 drivers/gpu/drm/bridge/tc358767.c ret = PTR_ERR(tc->refclk); tc 1563 drivers/gpu/drm/bridge/tc358767.c tc->regmap = devm_regmap_init_i2c(client, &tc_regmap_config); tc 1564 drivers/gpu/drm/bridge/tc358767.c if (IS_ERR(tc->regmap)) { tc 1565 drivers/gpu/drm/bridge/tc358767.c ret = PTR_ERR(tc->regmap); tc 1571 drivers/gpu/drm/bridge/tc358767.c &tc->hpd_pin); tc 1573 drivers/gpu/drm/bridge/tc358767.c tc->hpd_pin = -ENODEV; tc 1575 drivers/gpu/drm/bridge/tc358767.c if (tc->hpd_pin < 0 || tc->hpd_pin > 1) { tc 1583 drivers/gpu/drm/bridge/tc358767.c regmap_write(tc->regmap, INTCTL_G, INT_SYSERR); tc 1588 drivers/gpu/drm/bridge/tc358767.c "tc358767-irq", tc); tc 1594 drivers/gpu/drm/bridge/tc358767.c tc->have_irq = true; tc 1597 drivers/gpu/drm/bridge/tc358767.c ret = regmap_read(tc->regmap, TC_IDREG, &tc->rev); tc 1599 drivers/gpu/drm/bridge/tc358767.c dev_err(tc->dev, "can not read device ID: %d\n", ret); tc 1603 drivers/gpu/drm/bridge/tc358767.c if ((tc->rev != 0x6601) && (tc->rev != 0x6603)) { tc 1604 drivers/gpu/drm/bridge/tc358767.c dev_err(tc->dev, "invalid device ID: 0x%08x\n", tc->rev); tc 1608 drivers/gpu/drm/bridge/tc358767.c tc->assr = (tc->rev == 0x6601); /* Enable ASSR for eDP panels */ tc 1610 drivers/gpu/drm/bridge/tc358767.c if (!tc->reset_gpio) { tc 1617 drivers/gpu/drm/bridge/tc358767.c regmap_update_bits(tc->regmap, SYSRSTENB, tc 1620 drivers/gpu/drm/bridge/tc358767.c regmap_update_bits(tc->regmap, SYSRSTENB, tc 1626 drivers/gpu/drm/bridge/tc358767.c if (tc->hpd_pin >= 0) { tc 1627 drivers/gpu/drm/bridge/tc358767.c u32 lcnt_reg = tc->hpd_pin == 0 ? INT_GP0_LCNT : INT_GP1_LCNT; tc 1628 drivers/gpu/drm/bridge/tc358767.c u32 h_lc = INT_GPIO_H(tc->hpd_pin) | INT_GPIO_LC(tc->hpd_pin); tc 1631 drivers/gpu/drm/bridge/tc358767.c regmap_write(tc->regmap, lcnt_reg, tc 1632 drivers/gpu/drm/bridge/tc358767.c clk_get_rate(tc->refclk) * 2 / 1000); tc 1634 drivers/gpu/drm/bridge/tc358767.c regmap_write(tc->regmap, GPIOM, BIT(tc->hpd_pin)); tc 1636 drivers/gpu/drm/bridge/tc358767.c if (tc->have_irq) { tc 1638 drivers/gpu/drm/bridge/tc358767.c regmap_update_bits(tc->regmap, INTCTL_G, h_lc, h_lc); tc 1642 drivers/gpu/drm/bridge/tc358767.c ret = tc_aux_link_setup(tc); tc 1647 drivers/gpu/drm/bridge/tc358767.c tc->aux.name = "TC358767 AUX i2c adapter"; tc 1648 drivers/gpu/drm/bridge/tc358767.c tc->aux.dev = tc->dev; tc 1649 drivers/gpu/drm/bridge/tc358767.c tc->aux.transfer = tc_aux_transfer; tc 1650 drivers/gpu/drm/bridge/tc358767.c ret = drm_dp_aux_register(&tc->aux); tc 1654 drivers/gpu/drm/bridge/tc358767.c tc->bridge.funcs = &tc_bridge_funcs; tc 1655 drivers/gpu/drm/bridge/tc358767.c tc->bridge.of_node = dev->of_node; tc 1656 drivers/gpu/drm/bridge/tc358767.c drm_bridge_add(&tc->bridge); tc 1658 drivers/gpu/drm/bridge/tc358767.c i2c_set_clientdata(client, tc); tc 1665 drivers/gpu/drm/bridge/tc358767.c struct tc_data *tc = i2c_get_clientdata(client); tc 1667 drivers/gpu/drm/bridge/tc358767.c drm_bridge_remove(&tc->bridge); tc 1668 drivers/gpu/drm/bridge/tc358767.c drm_dp_aux_unregister(&tc->aux); tc 10324 drivers/gpu/drm/i915/i915_reg.h #define _MMIO_DSI(tc, dsi0, dsi1) _MMIO_TRANS((tc) - TRANSCODER_DSI_0, \ tc 10432 drivers/gpu/drm/i915/i915_reg.h #define BXT_MIPI_TRANS_HACTIVE(tc) _MMIO_MIPI(tc, \ tc 10437 drivers/gpu/drm/i915/i915_reg.h #define BXT_MIPI_TRANS_VACTIVE(tc) _MMIO_MIPI(tc, \ tc 10442 drivers/gpu/drm/i915/i915_reg.h #define BXT_MIPI_TRANS_VTOTAL(tc) _MMIO_MIPI(tc, \ tc 10480 drivers/gpu/drm/i915/i915_reg.h #define BXT_MIPI_PORT_CTRL(tc) _MMIO_MIPI(tc, _BXT_MIPIA_PORT_CTRL, _BXT_MIPIC_PORT_CTRL) tc 10943 drivers/gpu/drm/i915/i915_reg.h #define DSI_TRANS_FUNC_CONF(tc) _MMIO_DSI(tc, \ tc 10988 drivers/gpu/drm/i915/i915_reg.h #define DSI_CMD_RXCTL(tc) _MMIO_DSI(tc, \ tc 11003 drivers/gpu/drm/i915/i915_reg.h #define DSI_CMD_TXCTL(tc) _MMIO_DSI(tc, \ tc 11016 drivers/gpu/drm/i915/i915_reg.h #define DSI_CMD_TXHDR(tc) _MMIO_DSI(tc, \ tc 11032 drivers/gpu/drm/i915/i915_reg.h #define DSI_CMD_TXPYLD(tc) _MMIO_DSI(tc, \ tc 11038 drivers/gpu/drm/i915/i915_reg.h #define DSI_LP_MSG(tc) _MMIO_DSI(tc, \ tc 11049 drivers/gpu/drm/i915/i915_reg.h #define DSI_HSTX_TO(tc) _MMIO_DSI(tc, \ tc 11059 drivers/gpu/drm/i915/i915_reg.h #define DSI_LPRX_HOST_TO(tc) _MMIO_DSI(tc, \ tc 11069 drivers/gpu/drm/i915/i915_reg.h #define DSI_PWAIT_TO(tc) _MMIO_DSI(tc, \ tc 11081 drivers/gpu/drm/i915/i915_reg.h #define DSI_TA_TO(tc) _MMIO_DSI(tc, \ tc 491 drivers/greybus/svc.c request.tc = 0; /* TC0 */ tc 237 drivers/ide/au1xxx-ide.c unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00; tc 253 drivers/ide/au1xxx-ide.c sg_phys(sg), tc, flags)) { tc 259 drivers/ide/au1xxx-ide.c sg_phys(sg), tc, flags)) { tc 265 drivers/ide/au1xxx-ide.c cur_addr += tc; tc 266 drivers/ide/au1xxx-ide.c cur_len -= tc; tc 1489 drivers/ide/pmac.c unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00; tc 1497 drivers/ide/pmac.c table->req_count = cpu_to_le16(tc); tc 1502 drivers/ide/pmac.c cur_addr += tc; tc 1503 drivers/ide/pmac.c cur_len -= tc; tc 667 drivers/infiniband/hw/mthca/mthca_cmd.c int ts = 0, tc = 0; tc 702 drivers/infiniband/hw/mthca/mthca_cmd.c ++tc; tc 720 drivers/infiniband/hw/mthca/mthca_cmd.c mthca_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts); tc 723 drivers/infiniband/hw/mthca/mthca_cmd.c mthca_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts); tc 727 drivers/infiniband/hw/mthca/mthca_cmd.c tc, ts, (unsigned long long) virt - (ts << 10)); tc 2925 drivers/infiniband/hw/qib/qib_iba6120.c u64 ta, tb, tc, td, te; tc 2928 drivers/infiniband/hw/qib/qib_iba6120.c qib_snapshot_counters(ppd, &ta, &tb, &tc, &td, &te); tc 2932 drivers/infiniband/hw/qib/qib_iba6120.c cs->spkts = tc - cs->spkts; tc 88 drivers/input/touchscreen/ads7846.c struct ts_event tc; tc 712 drivers/input/touchscreen/ads7846.c packet->tc.ignore = true; tc 730 drivers/input/touchscreen/ads7846.c packet->tc.ignore = true; tc 736 drivers/input/touchscreen/ads7846.c packet->tc.ignore = false; tc 761 drivers/input/touchscreen/ads7846.c x = *(u16 *)packet->tc.x_buf; tc 762 drivers/input/touchscreen/ads7846.c y = *(u16 *)packet->tc.y_buf; tc 766 drivers/input/touchscreen/ads7846.c x = packet->tc.x; tc 767 drivers/input/touchscreen/ads7846.c y = packet->tc.y; tc 768 drivers/input/touchscreen/ads7846.c z1 = packet->tc.z1; tc 769 drivers/input/touchscreen/ads7846.c z2 = packet->tc.z2; tc 801 drivers/input/touchscreen/ads7846.c if (packet->tc.ignore || Rt > ts->pressure_max) { tc 803 drivers/input/touchscreen/ads7846.c packet->tc.ignore, Rt); tc 999 drivers/input/touchscreen/ads7846.c x->rx_buf = &packet->tc.y_buf[0]; tc 1010 drivers/input/touchscreen/ads7846.c x->rx_buf = &packet->tc.y; tc 1029 drivers/input/touchscreen/ads7846.c x->rx_buf = &packet->tc.y; tc 1045 drivers/input/touchscreen/ads7846.c x->rx_buf = &packet->tc.x_buf[0]; tc 1057 drivers/input/touchscreen/ads7846.c x->rx_buf = &packet->tc.x; tc 1072 drivers/input/touchscreen/ads7846.c x->rx_buf = &packet->tc.x; tc 1091 drivers/input/touchscreen/ads7846.c x->rx_buf = &packet->tc.z1; tc 1105 drivers/input/touchscreen/ads7846.c x->rx_buf = &packet->tc.z1; tc 1122 drivers/input/touchscreen/ads7846.c x->rx_buf = &packet->tc.z2; tc 1136 drivers/input/touchscreen/ads7846.c x->rx_buf = &packet->tc.z2; tc 46 drivers/input/touchscreen/tps6507x-ts.c struct ts_event tc; tc 159 drivers/input/touchscreen/tps6507x-ts.c &tsc->tc.pressure); tc 163 drivers/input/touchscreen/tps6507x-ts.c pendown = tsc->tc.pressure > tsc->min_pressure; tc 182 drivers/input/touchscreen/tps6507x-ts.c &tsc->tc.x); tc 187 drivers/input/touchscreen/tps6507x-ts.c &tsc->tc.y); tc 191 drivers/input/touchscreen/tps6507x-ts.c input_report_abs(input_dev, ABS_X, tsc->tc.x); tc 192 drivers/input/touchscreen/tps6507x-ts.c input_report_abs(input_dev, ABS_Y, tsc->tc.y); tc 193 drivers/input/touchscreen/tps6507x-ts.c input_report_abs(input_dev, ABS_PRESSURE, tsc->tc.pressure); tc 85 drivers/input/touchscreen/tsc2007.h u32 tsc2007_calculate_resistance(struct tsc2007 *tsc, struct ts_event *tc); tc 52 drivers/input/touchscreen/tsc2007_core.c static void tsc2007_read_values(struct tsc2007 *tsc, struct ts_event *tc) tc 55 drivers/input/touchscreen/tsc2007_core.c tc->y = tsc2007_xfer(tsc, READ_Y); tc 58 drivers/input/touchscreen/tsc2007_core.c tc->x = tsc2007_xfer(tsc, READ_X); tc 61 drivers/input/touchscreen/tsc2007_core.c tc->z1 = tsc2007_xfer(tsc, READ_Z1); tc 62 drivers/input/touchscreen/tsc2007_core.c tc->z2 = tsc2007_xfer(tsc, READ_Z2); tc 68 drivers/input/touchscreen/tsc2007_core.c u32 tsc2007_calculate_resistance(struct tsc2007 *tsc, struct ts_event *tc) tc 73 drivers/input/touchscreen/tsc2007_core.c if (tc->x == MAX_12BIT) tc 74 drivers/input/touchscreen/tsc2007_core.c tc->x = 0; tc 76 drivers/input/touchscreen/tsc2007_core.c if (likely(tc->x && tc->z1)) { tc 78 drivers/input/touchscreen/tsc2007_core.c rt = tc->z2 - tc->z1; tc 79 drivers/input/touchscreen/tsc2007_core.c rt *= tc->x; tc 81 drivers/input/touchscreen/tsc2007_core.c rt /= tc->z1; tc 114 drivers/input/touchscreen/tsc2007_core.c struct ts_event tc; tc 122 drivers/input/touchscreen/tsc2007_core.c tsc2007_read_values(ts, &tc); tc 125 drivers/input/touchscreen/tsc2007_core.c rt = tsc2007_calculate_resistance(ts, &tc); tc 139 drivers/input/touchscreen/tsc2007_core.c tc.x, tc.y, rt); tc 144 drivers/input/touchscreen/tsc2007_core.c input_report_abs(input, ABS_X, tc.x); tc 145 drivers/input/touchscreen/tsc2007_core.c input_report_abs(input, ABS_Y, tc.y); tc 71 drivers/input/touchscreen/tsc2007_iio.c struct ts_event tc; tc 73 drivers/input/touchscreen/tsc2007_iio.c tc.x = tsc2007_xfer(tsc, READ_X); tc 74 drivers/input/touchscreen/tsc2007_iio.c tc.z1 = tsc2007_xfer(tsc, READ_Z1); tc 75 drivers/input/touchscreen/tsc2007_iio.c tc.z2 = tsc2007_xfer(tsc, READ_Z2); tc 76 drivers/input/touchscreen/tsc2007_iio.c *val = tsc2007_calculate_resistance(tsc, &tc); tc 2341 drivers/md/bcache/super.c struct cache_set *c, *tc; tc 2344 drivers/md/bcache/super.c list_for_each_entry_safe(c, tc, &bch_cache_sets, list) tc 2356 drivers/md/bcache/super.c struct cache_set *c, *tc; tc 2360 drivers/md/bcache/super.c list_for_each_entry_safe(c, tc, &bch_cache_sets, list) tc 2500 drivers/md/bcache/super.c struct cache_set *c, *tc; tc 2512 drivers/md/bcache/super.c list_for_each_entry_safe(c, tc, &bch_cache_sets, list) { tc 2547 drivers/md/bcache/super.c struct cache_set *c, *tc; tc 2585 drivers/md/bcache/super.c list_for_each_entry_safe(c, tc, &bch_cache_sets, list) tc 42 drivers/md/dm-snap-transient.c struct transient_c *tc = store->context; tc 45 drivers/md/dm-snap-transient.c if (size < (tc->next_free + store->chunk_size)) tc 48 drivers/md/dm-snap-transient.c e->new_chunk = sector_to_chunk(store, tc->next_free); tc 49 drivers/md/dm-snap-transient.c tc->next_free += store->chunk_size; tc 75 drivers/md/dm-snap-transient.c struct transient_c *tc; tc 77 drivers/md/dm-snap-transient.c tc = kmalloc(sizeof(struct transient_c), GFP_KERNEL); tc 78 drivers/md/dm-snap-transient.c if (!tc) tc 81 drivers/md/dm-snap-transient.c tc->next_free = 0; tc 82 drivers/md/dm-snap-transient.c store->context = tc; tc 223 drivers/md/dm-thin.c typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio); tc 224 drivers/md/dm-thin.c typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell); tc 381 drivers/md/dm-thin.c struct thin_c *tc; tc 387 drivers/md/dm-thin.c static void begin_discard(struct discard_op *op, struct thin_c *tc, struct bio *parent) tc 391 drivers/md/dm-thin.c op->tc = tc; tc 399 drivers/md/dm-thin.c struct thin_c *tc = op->tc; tc 400 drivers/md/dm-thin.c sector_t s = block_to_sectors(tc->pool, data_b); tc 401 drivers/md/dm-thin.c sector_t len = block_to_sectors(tc->pool, data_e - data_b); tc 403 drivers/md/dm-thin.c return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, tc 586 drivers/md/dm-thin.c struct thin_c *tc; tc 610 drivers/md/dm-thin.c static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, tc 618 drivers/md/dm-thin.c spin_lock_irqsave(&tc->lock, flags); tc 620 drivers/md/dm-thin.c spin_unlock_irqrestore(&tc->lock, flags); tc 625 drivers/md/dm-thin.c static void requeue_deferred_cells(struct thin_c *tc) tc 627 drivers/md/dm-thin.c struct pool *pool = tc->pool; tc 634 drivers/md/dm-thin.c spin_lock_irqsave(&tc->lock, flags); tc 635 drivers/md/dm-thin.c list_splice_init(&tc->deferred_cells, &cells); tc 636 drivers/md/dm-thin.c spin_unlock_irqrestore(&tc->lock, flags); tc 642 drivers/md/dm-thin.c static void requeue_io(struct thin_c *tc) tc 649 drivers/md/dm-thin.c spin_lock_irqsave(&tc->lock, flags); tc 650 drivers/md/dm-thin.c __merge_bio_list(&bios, &tc->deferred_bio_list); tc 651 drivers/md/dm-thin.c __merge_bio_list(&bios, &tc->retry_on_resume_list); tc 652 drivers/md/dm-thin.c spin_unlock_irqrestore(&tc->lock, flags); tc 655 drivers/md/dm-thin.c requeue_deferred_cells(tc); tc 660 drivers/md/dm-thin.c struct thin_c *tc; tc 663 drivers/md/dm-thin.c list_for_each_entry_rcu(tc, &pool->active_thins, list) tc 664 drivers/md/dm-thin.c error_thin_bio_list(tc, &tc->retry_on_resume_list, error); tc 680 drivers/md/dm-thin.c static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio) tc 682 drivers/md/dm-thin.c struct pool *pool = tc->pool; tc 696 drivers/md/dm-thin.c static void get_bio_block_range(struct thin_c *tc, struct bio *bio, tc 699 drivers/md/dm-thin.c struct pool *pool = tc->pool; tc 721 drivers/md/dm-thin.c static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block) tc 723 drivers/md/dm-thin.c struct pool *pool = tc->pool; tc 726 drivers/md/dm-thin.c bio_set_dev(bio, tc->pool_dev->bdev); tc 736 drivers/md/dm-thin.c static void remap_to_origin(struct thin_c *tc, struct bio *bio) tc 738 drivers/md/dm-thin.c bio_set_dev(bio, tc->origin_dev->bdev); tc 741 drivers/md/dm-thin.c static int bio_triggers_commit(struct thin_c *tc, struct bio *bio) tc 744 drivers/md/dm-thin.c dm_thin_changed_this_transaction(tc->td); tc 758 drivers/md/dm-thin.c static void issue(struct thin_c *tc, struct bio *bio) tc 760 drivers/md/dm-thin.c struct pool *pool = tc->pool; tc 763 drivers/md/dm-thin.c if (!bio_triggers_commit(tc, bio)) { tc 773 drivers/md/dm-thin.c if (dm_thin_aborted_changes(tc->td)) { tc 787 drivers/md/dm-thin.c static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio) tc 789 drivers/md/dm-thin.c remap_to_origin(tc, bio); tc 790 drivers/md/dm-thin.c issue(tc, bio); tc 793 drivers/md/dm-thin.c static void remap_and_issue(struct thin_c *tc, struct bio *bio, tc 796 drivers/md/dm-thin.c remap(tc, bio, block); tc 797 drivers/md/dm-thin.c issue(tc, bio); tc 819 drivers/md/dm-thin.c struct thin_c *tc; tc 836 drivers/md/dm-thin.c struct pool *pool = m->tc->pool; tc 847 drivers/md/dm-thin.c struct pool *pool = m->tc->pool; tc 887 drivers/md/dm-thin.c static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell) tc 889 drivers/md/dm-thin.c struct pool *pool = tc->pool; tc 892 drivers/md/dm-thin.c spin_lock_irqsave(&tc->lock, flags); tc 893 drivers/md/dm-thin.c cell_release_no_holder(pool, cell, &tc->deferred_bio_list); tc 894 drivers/md/dm-thin.c spin_unlock_irqrestore(&tc->lock, flags); tc 899 drivers/md/dm-thin.c static void thin_defer_bio(struct thin_c *tc, struct bio *bio); tc 902 drivers/md/dm-thin.c struct thin_c *tc; tc 917 drivers/md/dm-thin.c inc_all_io_entry(info->tc->pool, bio); tc 929 drivers/md/dm-thin.c static void inc_remap_and_issue_cell(struct thin_c *tc, tc 936 drivers/md/dm-thin.c info.tc = tc; tc 945 drivers/md/dm-thin.c cell_visit_release(tc->pool, __inc_remap_and_issue_cell, tc 949 drivers/md/dm-thin.c thin_defer_bio(tc, bio); tc 952 drivers/md/dm-thin.c remap_and_issue(info.tc, bio, block); tc 957 drivers/md/dm-thin.c cell_error(m->tc->pool, m->cell); tc 959 drivers/md/dm-thin.c mempool_free(m, &m->tc->pool->mapping_pool); tc 962 drivers/md/dm-thin.c static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio) tc 964 drivers/md/dm-thin.c struct pool *pool = tc->pool; tc 971 drivers/md/dm-thin.c if (!bio_triggers_commit(tc, bio)) { tc 981 drivers/md/dm-thin.c if (dm_thin_aborted_changes(tc->td)) { tc 997 drivers/md/dm-thin.c struct thin_c *tc = m->tc; tc 998 drivers/md/dm-thin.c struct pool *pool = tc->pool; tc 1012 drivers/md/dm-thin.c r = dm_thin_insert_block(tc->td, m->virt_begin, m->data_block); tc 1026 drivers/md/dm-thin.c inc_remap_and_issue_cell(tc, m->cell, m->data_block); tc 1027 drivers/md/dm-thin.c complete_overwrite_bio(tc, bio); tc 1029 drivers/md/dm-thin.c inc_all_io_entry(tc->pool, m->cell->holder); tc 1030 drivers/md/dm-thin.c remap_and_issue(tc, m->cell->holder, m->data_block); tc 1031 drivers/md/dm-thin.c inc_remap_and_issue_cell(tc, m->cell, m->data_block); tc 1043 drivers/md/dm-thin.c struct thin_c *tc = m->tc; tc 1045 drivers/md/dm-thin.c cell_defer_no_holder(tc, m->cell); tc 1046 drivers/md/dm-thin.c mempool_free(m, &tc->pool->mapping_pool); tc 1064 drivers/md/dm-thin.c struct thin_c *tc = m->tc; tc 1066 drivers/md/dm-thin.c r = dm_thin_remove_range(tc->td, m->cell->key.block_begin, m->cell->key.block_end); tc 1068 drivers/md/dm-thin.c metadata_operation_failed(tc->pool, "dm_thin_remove_range", r); tc 1073 drivers/md/dm-thin.c cell_defer_no_holder(tc, m->cell); tc 1074 drivers/md/dm-thin.c mempool_free(m, &tc->pool->mapping_pool); tc 1088 drivers/md/dm-thin.c struct thin_c *tc = m->tc; tc 1089 drivers/md/dm-thin.c struct pool *pool = tc->pool; tc 1093 drivers/md/dm-thin.c begin_discard(&op, tc, discard_parent); tc 1131 drivers/md/dm-thin.c struct pool *pool = m->tc->pool; tc 1152 drivers/md/dm-thin.c struct thin_c *tc = m->tc; tc 1153 drivers/md/dm-thin.c struct pool *pool = tc->pool; tc 1162 drivers/md/dm-thin.c r = dm_thin_remove_range(tc->td, m->virt_begin, m->virt_end); tc 1166 drivers/md/dm-thin.c cell_defer_no_holder(tc, m->cell); tc 1179 drivers/md/dm-thin.c cell_defer_no_holder(tc, m->cell); tc 1187 drivers/md/dm-thin.c dm_device_name(tc->pool->pool_md)); tc 1199 drivers/md/dm-thin.c begin_discard(&op, tc, discard_parent); tc 1209 drivers/md/dm-thin.c struct thin_c *tc = m->tc; tc 1210 drivers/md/dm-thin.c struct pool *pool = tc->pool; tc 1224 drivers/md/dm-thin.c cell_defer_no_holder(tc, m->cell); tc 1291 drivers/md/dm-thin.c static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m, tc 1296 drivers/md/dm-thin.c to.bdev = tc->pool_dev->bdev; tc 1300 drivers/md/dm-thin.c dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m); tc 1303 drivers/md/dm-thin.c static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio, tc 1307 drivers/md/dm-thin.c struct pool *pool = tc->pool; tc 1314 drivers/md/dm-thin.c remap_and_issue(tc, bio, data_begin); tc 1320 drivers/md/dm-thin.c static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, tc 1326 drivers/md/dm-thin.c struct pool *pool = tc->pool; tc 1329 drivers/md/dm-thin.c m->tc = tc; tc 1352 drivers/md/dm-thin.c remap_and_issue_overwrite(tc, bio, data_dest, m); tc 1360 drivers/md/dm-thin.c to.bdev = tc->pool_dev->bdev; tc 1372 drivers/md/dm-thin.c ll_zero(tc, m, tc 1381 drivers/md/dm-thin.c static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block, tc 1385 drivers/md/dm-thin.c schedule_copy(tc, virt_block, tc->pool_dev, tc 1387 drivers/md/dm-thin.c tc->pool->sectors_per_block); tc 1390 drivers/md/dm-thin.c static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, tc 1394 drivers/md/dm-thin.c struct pool *pool = tc->pool; tc 1398 drivers/md/dm-thin.c m->tc = tc; tc 1411 drivers/md/dm-thin.c remap_and_issue_overwrite(tc, bio, data_block, m); tc 1413 drivers/md/dm-thin.c ll_zero(tc, m, data_block * pool->sectors_per_block, tc 1419 drivers/md/dm-thin.c static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block, tc 1423 drivers/md/dm-thin.c struct pool *pool = tc->pool; tc 1427 drivers/md/dm-thin.c if (virt_block_end <= tc->origin_size) tc 1428 drivers/md/dm-thin.c schedule_copy(tc, virt_block, tc->origin_dev, tc 1432 drivers/md/dm-thin.c else if (virt_block_begin < tc->origin_size) tc 1433 drivers/md/dm-thin.c schedule_copy(tc, virt_block, tc->origin_dev, tc 1435 drivers/md/dm-thin.c tc->origin_size - virt_block_begin); tc 1438 drivers/md/dm-thin.c schedule_zero(tc, virt_block, data_dest, cell, bio); tc 1527 drivers/md/dm-thin.c static int alloc_data_block(struct thin_c *tc, dm_block_t *result) tc 1531 drivers/md/dm-thin.c struct pool *pool = tc->pool; tc 1597 drivers/md/dm-thin.c struct thin_c *tc = h->tc; tc 1600 drivers/md/dm-thin.c spin_lock_irqsave(&tc->lock, flags); tc 1601 drivers/md/dm-thin.c bio_list_add(&tc->retry_on_resume_list, bio); tc 1602 drivers/md/dm-thin.c spin_unlock_irqrestore(&tc->lock, flags); tc 1659 drivers/md/dm-thin.c static void process_discard_cell_no_passdown(struct thin_c *tc, tc 1662 drivers/md/dm-thin.c struct pool *pool = tc->pool; tc 1669 drivers/md/dm-thin.c m->tc = tc; tc 1679 drivers/md/dm-thin.c static void break_up_discard_bio(struct thin_c *tc, dm_block_t begin, dm_block_t end, tc 1682 drivers/md/dm-thin.c struct pool *pool = tc->pool; tc 1697 drivers/md/dm-thin.c r = dm_thin_find_mapped_range(tc->td, begin, end, &virt_begin, &virt_end, tc 1706 drivers/md/dm-thin.c build_key(tc->td, PHYSICAL, data_begin, data_begin + (virt_end - virt_begin), &data_key); tc 1707 drivers/md/dm-thin.c if (bio_detain(tc->pool, &data_key, NULL, &data_cell)) { tc 1718 drivers/md/dm-thin.c m->tc = tc; tc 1742 drivers/md/dm-thin.c static void process_discard_cell_passdown(struct thin_c *tc, struct dm_bio_prison_cell *virt_cell) tc 1753 drivers/md/dm-thin.c break_up_discard_bio(tc, virt_cell->key.block_begin, virt_cell->key.block_end, bio); tc 1763 drivers/md/dm-thin.c static void process_discard_bio(struct thin_c *tc, struct bio *bio) tc 1769 drivers/md/dm-thin.c get_bio_block_range(tc, bio, &begin, &end); tc 1778 drivers/md/dm-thin.c build_key(tc->td, VIRTUAL, begin, end, &virt_key); tc 1779 drivers/md/dm-thin.c if (bio_detain(tc->pool, &virt_key, bio, &virt_cell)) tc 1789 drivers/md/dm-thin.c tc->pool->process_discard_cell(tc, virt_cell); tc 1792 drivers/md/dm-thin.c static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, tc 1799 drivers/md/dm-thin.c struct pool *pool = tc->pool; tc 1801 drivers/md/dm-thin.c r = alloc_data_block(tc, &data_block); tc 1804 drivers/md/dm-thin.c schedule_internal_copy(tc, block, lookup_result->block, tc 1833 drivers/md/dm-thin.c h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds); tc 1834 drivers/md/dm-thin.c inc_all_io_entry(info->tc->pool, bio); tc 1840 drivers/md/dm-thin.c static void remap_and_issue_shared_cell(struct thin_c *tc, tc 1847 drivers/md/dm-thin.c info.tc = tc; tc 1851 drivers/md/dm-thin.c cell_visit_release(tc->pool, __remap_and_issue_shared_cell, tc 1855 drivers/md/dm-thin.c thin_defer_bio(tc, bio); tc 1858 drivers/md/dm-thin.c remap_and_issue(tc, bio, block); tc 1861 drivers/md/dm-thin.c static void process_shared_bio(struct thin_c *tc, struct bio *bio, tc 1867 drivers/md/dm-thin.c struct pool *pool = tc->pool; tc 1874 drivers/md/dm-thin.c build_data_key(tc->td, lookup_result->block, &key); tc 1876 drivers/md/dm-thin.c cell_defer_no_holder(tc, virt_cell); tc 1881 drivers/md/dm-thin.c break_sharing(tc, bio, block, &key, lookup_result, data_cell); tc 1882 drivers/md/dm-thin.c cell_defer_no_holder(tc, virt_cell); tc 1888 drivers/md/dm-thin.c remap_and_issue(tc, bio, lookup_result->block); tc 1890 drivers/md/dm-thin.c remap_and_issue_shared_cell(tc, data_cell, lookup_result->block); tc 1891 drivers/md/dm-thin.c remap_and_issue_shared_cell(tc, virt_cell, lookup_result->block); tc 1895 drivers/md/dm-thin.c static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block, tc 1900 drivers/md/dm-thin.c struct pool *pool = tc->pool; tc 1907 drivers/md/dm-thin.c cell_defer_no_holder(tc, cell); tc 1909 drivers/md/dm-thin.c remap_and_issue(tc, bio, 0); tc 1918 drivers/md/dm-thin.c cell_defer_no_holder(tc, cell); tc 1923 drivers/md/dm-thin.c r = alloc_data_block(tc, &data_block); tc 1926 drivers/md/dm-thin.c if (tc->origin_dev) tc 1927 drivers/md/dm-thin.c schedule_external_copy(tc, block, data_block, cell, bio); tc 1929 drivers/md/dm-thin.c schedule_zero(tc, block, data_block, cell, bio); tc 1944 drivers/md/dm-thin.c static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell) tc 1947 drivers/md/dm-thin.c struct pool *pool = tc->pool; tc 1949 drivers/md/dm-thin.c dm_block_t block = get_bio_block(tc, bio); tc 1952 drivers/md/dm-thin.c if (tc->requeue_mode) { tc 1957 drivers/md/dm-thin.c r = dm_thin_find_block(tc->td, block, 1, &lookup_result); tc 1961 drivers/md/dm-thin.c process_shared_bio(tc, bio, block, &lookup_result, cell); tc 1964 drivers/md/dm-thin.c remap_and_issue(tc, bio, lookup_result.block); tc 1965 drivers/md/dm-thin.c inc_remap_and_issue_cell(tc, cell, lookup_result.block); tc 1970 drivers/md/dm-thin.c if (bio_data_dir(bio) == READ && tc->origin_dev) { tc 1972 drivers/md/dm-thin.c cell_defer_no_holder(tc, cell); tc 1974 drivers/md/dm-thin.c if (bio_end_sector(bio) <= tc->origin_size) tc 1975 drivers/md/dm-thin.c remap_to_origin_and_issue(tc, bio); tc 1977 drivers/md/dm-thin.c else if (bio->bi_iter.bi_sector < tc->origin_size) { tc 1979 drivers/md/dm-thin.c bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT; tc 1980 drivers/md/dm-thin.c remap_to_origin_and_issue(tc, bio); tc 1987 drivers/md/dm-thin.c provision_block(tc, bio, block, cell); tc 1993 drivers/md/dm-thin.c cell_defer_no_holder(tc, cell); tc 1999 drivers/md/dm-thin.c static void process_bio(struct thin_c *tc, struct bio *bio) tc 2001 drivers/md/dm-thin.c struct pool *pool = tc->pool; tc 2002 drivers/md/dm-thin.c dm_block_t block = get_bio_block(tc, bio); tc 2010 drivers/md/dm-thin.c build_virtual_key(tc->td, block, &key); tc 2014 drivers/md/dm-thin.c process_cell(tc, cell); tc 2017 drivers/md/dm-thin.c static void __process_bio_read_only(struct thin_c *tc, struct bio *bio, tc 2022 drivers/md/dm-thin.c dm_block_t block = get_bio_block(tc, bio); tc 2025 drivers/md/dm-thin.c r = dm_thin_find_block(tc->td, block, 1, &lookup_result); tc 2029 drivers/md/dm-thin.c handle_unserviceable_bio(tc->pool, bio); tc 2031 drivers/md/dm-thin.c cell_defer_no_holder(tc, cell); tc 2033 drivers/md/dm-thin.c inc_all_io_entry(tc->pool, bio); tc 2034 drivers/md/dm-thin.c remap_and_issue(tc, bio, lookup_result.block); tc 2036 drivers/md/dm-thin.c inc_remap_and_issue_cell(tc, cell, lookup_result.block); tc 2042 drivers/md/dm-thin.c cell_defer_no_holder(tc, cell); tc 2044 drivers/md/dm-thin.c handle_unserviceable_bio(tc->pool, bio); tc 2048 drivers/md/dm-thin.c if (tc->origin_dev) { tc 2049 drivers/md/dm-thin.c inc_all_io_entry(tc->pool, bio); tc 2050 drivers/md/dm-thin.c remap_to_origin_and_issue(tc, bio); tc 2062 drivers/md/dm-thin.c cell_defer_no_holder(tc, cell); tc 2068 drivers/md/dm-thin.c static void process_bio_read_only(struct thin_c *tc, struct bio *bio) tc 2070 drivers/md/dm-thin.c __process_bio_read_only(tc, bio, NULL); tc 2073 drivers/md/dm-thin.c static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell *cell) tc 2075 drivers/md/dm-thin.c __process_bio_read_only(tc, cell->holder, cell); tc 2078 drivers/md/dm-thin.c static void process_bio_success(struct thin_c *tc, struct bio *bio) tc 2083 drivers/md/dm-thin.c static void process_bio_fail(struct thin_c *tc, struct bio *bio) tc 2088 drivers/md/dm-thin.c static void process_cell_success(struct thin_c *tc, struct dm_bio_prison_cell *cell) tc 2090 drivers/md/dm-thin.c cell_success(tc->pool, cell); tc 2093 drivers/md/dm-thin.c static void process_cell_fail(struct thin_c *tc, struct dm_bio_prison_cell *cell) tc 2095 drivers/md/dm-thin.c cell_error(tc->pool, cell); tc 2111 drivers/md/dm-thin.c static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio) tc 2117 drivers/md/dm-thin.c rbp = &tc->sort_bio_list.rb_node; tc 2131 drivers/md/dm-thin.c rb_insert_color(&pbd->rb_node, &tc->sort_bio_list); tc 2134 drivers/md/dm-thin.c static void __extract_sorted_bios(struct thin_c *tc) tc 2140 drivers/md/dm-thin.c for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) { tc 2144 drivers/md/dm-thin.c bio_list_add(&tc->deferred_bio_list, bio); tc 2145 drivers/md/dm-thin.c rb_erase(&pbd->rb_node, &tc->sort_bio_list); tc 2148 drivers/md/dm-thin.c WARN_ON(!RB_EMPTY_ROOT(&tc->sort_bio_list)); tc 2151 drivers/md/dm-thin.c static void __sort_thin_deferred_bios(struct thin_c *tc) tc 2157 drivers/md/dm-thin.c bio_list_merge(&bios, &tc->deferred_bio_list); tc 2158 drivers/md/dm-thin.c bio_list_init(&tc->deferred_bio_list); tc 2162 drivers/md/dm-thin.c __thin_bio_rb_add(tc, bio); tc 2169 drivers/md/dm-thin.c __extract_sorted_bios(tc); tc 2172 drivers/md/dm-thin.c static void process_thin_deferred_bios(struct thin_c *tc) tc 2174 drivers/md/dm-thin.c struct pool *pool = tc->pool; tc 2181 drivers/md/dm-thin.c if (tc->requeue_mode) { tc 2182 drivers/md/dm-thin.c error_thin_bio_list(tc, &tc->deferred_bio_list, tc 2189 drivers/md/dm-thin.c spin_lock_irqsave(&tc->lock, flags); tc 2191 drivers/md/dm-thin.c if (bio_list_empty(&tc->deferred_bio_list)) { tc 2192 drivers/md/dm-thin.c spin_unlock_irqrestore(&tc->lock, flags); tc 2196 drivers/md/dm-thin.c __sort_thin_deferred_bios(tc); tc 2198 drivers/md/dm-thin.c bio_list_merge(&bios, &tc->deferred_bio_list); tc 2199 drivers/md/dm-thin.c bio_list_init(&tc->deferred_bio_list); tc 2201 drivers/md/dm-thin.c spin_unlock_irqrestore(&tc->lock, flags); tc 2211 drivers/md/dm-thin.c spin_lock_irqsave(&tc->lock, flags); tc 2212 drivers/md/dm-thin.c bio_list_add(&tc->deferred_bio_list, bio); tc 2213 drivers/md/dm-thin.c bio_list_merge(&tc->deferred_bio_list, &bios); tc 2214 drivers/md/dm-thin.c spin_unlock_irqrestore(&tc->lock, flags); tc 2219 drivers/md/dm-thin.c pool->process_discard(tc, bio); tc 2221 drivers/md/dm-thin.c pool->process_bio(tc, bio); tc 2266 drivers/md/dm-thin.c static void process_thin_deferred_cells(struct thin_c *tc) tc 2268 drivers/md/dm-thin.c struct pool *pool = tc->pool; tc 2276 drivers/md/dm-thin.c spin_lock_irqsave(&tc->lock, flags); tc 2277 drivers/md/dm-thin.c list_splice_init(&tc->deferred_cells, &cells); tc 2278 drivers/md/dm-thin.c spin_unlock_irqrestore(&tc->lock, flags); tc 2284 drivers/md/dm-thin.c count = sort_cells(tc->pool, &cells); tc 2299 drivers/md/dm-thin.c spin_lock_irqsave(&tc->lock, flags); tc 2300 drivers/md/dm-thin.c list_splice(&cells, &tc->deferred_cells); tc 2301 drivers/md/dm-thin.c spin_unlock_irqrestore(&tc->lock, flags); tc 2306 drivers/md/dm-thin.c pool->process_discard_cell(tc, cell); tc 2308 drivers/md/dm-thin.c pool->process_cell(tc, cell); tc 2313 drivers/md/dm-thin.c static void thin_get(struct thin_c *tc); tc 2314 drivers/md/dm-thin.c static void thin_put(struct thin_c *tc); tc 2323 drivers/md/dm-thin.c struct thin_c *tc = NULL; tc 2327 drivers/md/dm-thin.c tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list); tc 2328 drivers/md/dm-thin.c thin_get(tc); tc 2332 drivers/md/dm-thin.c return tc; tc 2335 drivers/md/dm-thin.c static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc) tc 2337 drivers/md/dm-thin.c struct thin_c *old_tc = tc; tc 2340 drivers/md/dm-thin.c list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) { tc 2341 drivers/md/dm-thin.c thin_get(tc); tc 2344 drivers/md/dm-thin.c return tc; tc 2357 drivers/md/dm-thin.c struct thin_c *tc; tc 2359 drivers/md/dm-thin.c tc = get_first_thin(pool); tc 2360 drivers/md/dm-thin.c while (tc) { tc 2361 drivers/md/dm-thin.c process_thin_deferred_cells(tc); tc 2362 drivers/md/dm-thin.c process_thin_deferred_bios(tc); tc 2363 drivers/md/dm-thin.c tc = get_next_thin(pool, tc); tc 2484 drivers/md/dm-thin.c struct thin_c *tc; tc 2495 drivers/md/dm-thin.c w->tc->requeue_mode = true; tc 2496 drivers/md/dm-thin.c requeue_io(w->tc); tc 2503 drivers/md/dm-thin.c w->tc->requeue_mode = false; tc 2507 drivers/md/dm-thin.c static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *)) tc 2511 drivers/md/dm-thin.c w.tc = tc; tc 2512 drivers/md/dm-thin.c pool_work_wait(&w.pw, tc->pool, fn); tc 2668 drivers/md/dm-thin.c static void thin_defer_bio(struct thin_c *tc, struct bio *bio) tc 2671 drivers/md/dm-thin.c struct pool *pool = tc->pool; tc 2673 drivers/md/dm-thin.c spin_lock_irqsave(&tc->lock, flags); tc 2674 drivers/md/dm-thin.c bio_list_add(&tc->deferred_bio_list, bio); tc 2675 drivers/md/dm-thin.c spin_unlock_irqrestore(&tc->lock, flags); tc 2680 drivers/md/dm-thin.c static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio) tc 2682 drivers/md/dm-thin.c struct pool *pool = tc->pool; tc 2685 drivers/md/dm-thin.c thin_defer_bio(tc, bio); tc 2689 drivers/md/dm-thin.c static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell) tc 2692 drivers/md/dm-thin.c struct pool *pool = tc->pool; tc 2695 drivers/md/dm-thin.c spin_lock_irqsave(&tc->lock, flags); tc 2696 drivers/md/dm-thin.c list_add_tail(&cell->user_list, &tc->deferred_cells); tc 2697 drivers/md/dm-thin.c spin_unlock_irqrestore(&tc->lock, flags); tc 2703 drivers/md/dm-thin.c static void thin_hook_bio(struct thin_c *tc, struct bio *bio) tc 2707 drivers/md/dm-thin.c h->tc = tc; tc 2720 drivers/md/dm-thin.c struct thin_c *tc = ti->private; tc 2721 drivers/md/dm-thin.c dm_block_t block = get_bio_block(tc, bio); tc 2722 drivers/md/dm-thin.c struct dm_thin_device *td = tc->td; tc 2727 drivers/md/dm-thin.c thin_hook_bio(tc, bio); tc 2729 drivers/md/dm-thin.c if (tc->requeue_mode) { tc 2735 drivers/md/dm-thin.c if (get_pool_mode(tc->pool) == PM_FAIL) { tc 2741 drivers/md/dm-thin.c thin_defer_bio_with_throttle(tc, bio); tc 2749 drivers/md/dm-thin.c build_virtual_key(tc->td, block, &key); tc 2750 drivers/md/dm-thin.c if (bio_detain(tc->pool, &key, bio, &virt_cell)) tc 2775 drivers/md/dm-thin.c thin_defer_cell(tc, virt_cell); tc 2779 drivers/md/dm-thin.c build_data_key(tc->td, result.block, &key); tc 2780 drivers/md/dm-thin.c if (bio_detain(tc->pool, &key, bio, &data_cell)) { tc 2781 drivers/md/dm-thin.c cell_defer_no_holder(tc, virt_cell); tc 2785 drivers/md/dm-thin.c inc_all_io_entry(tc->pool, bio); tc 2786 drivers/md/dm-thin.c cell_defer_no_holder(tc, data_cell); tc 2787 drivers/md/dm-thin.c cell_defer_no_holder(tc, virt_cell); tc 2789 drivers/md/dm-thin.c remap(tc, bio, result.block); tc 2794 drivers/md/dm-thin.c thin_defer_cell(tc, virt_cell); tc 2804 drivers/md/dm-thin.c cell_defer_no_holder(tc, virt_cell); tc 2824 drivers/md/dm-thin.c struct thin_c *tc; tc 2827 drivers/md/dm-thin.c list_for_each_entry_rcu(tc, &pool->active_thins, list) { tc 2828 drivers/md/dm-thin.c spin_lock_irqsave(&tc->lock, flags); tc 2829 drivers/md/dm-thin.c bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list); tc 2830 drivers/md/dm-thin.c bio_list_init(&tc->retry_on_resume_list); tc 2831 drivers/md/dm-thin.c spin_unlock_irqrestore(&tc->lock, flags); tc 3613 drivers/md/dm-thin.c struct thin_c *tc; tc 3616 drivers/md/dm-thin.c tc = get_first_thin(pool); tc 3617 drivers/md/dm-thin.c while (tc) { tc 3618 drivers/md/dm-thin.c dm_internal_suspend_noflush(tc->thin_md); tc 3619 drivers/md/dm-thin.c tc = get_next_thin(pool, tc); tc 3625 drivers/md/dm-thin.c struct thin_c *tc; tc 3628 drivers/md/dm-thin.c tc = get_first_thin(pool); tc 3629 drivers/md/dm-thin.c while (tc) { tc 3630 drivers/md/dm-thin.c dm_internal_resume(tc->thin_md); tc 3631 drivers/md/dm-thin.c tc = get_next_thin(pool, tc); tc 4144 drivers/md/dm-thin.c static void thin_get(struct thin_c *tc) tc 4146 drivers/md/dm-thin.c refcount_inc(&tc->refcount); tc 4149 drivers/md/dm-thin.c static void thin_put(struct thin_c *tc) tc 4151 drivers/md/dm-thin.c if (refcount_dec_and_test(&tc->refcount)) tc 4152 drivers/md/dm-thin.c complete(&tc->can_destroy); tc 4157 drivers/md/dm-thin.c struct thin_c *tc = ti->private; tc 4160 drivers/md/dm-thin.c spin_lock_irqsave(&tc->pool->lock, flags); tc 4161 drivers/md/dm-thin.c list_del_rcu(&tc->list); tc 4162 drivers/md/dm-thin.c spin_unlock_irqrestore(&tc->pool->lock, flags); tc 4165 drivers/md/dm-thin.c thin_put(tc); tc 4166 drivers/md/dm-thin.c wait_for_completion(&tc->can_destroy); tc 4170 drivers/md/dm-thin.c __pool_dec(tc->pool); tc 4171 drivers/md/dm-thin.c dm_pool_close_thin_device(tc->td); tc 4172 drivers/md/dm-thin.c dm_put_device(ti, tc->pool_dev); tc 4173 drivers/md/dm-thin.c if (tc->origin_dev) tc 4174 drivers/md/dm-thin.c dm_put_device(ti, tc->origin_dev); tc 4175 drivers/md/dm-thin.c kfree(tc); tc 4195 drivers/md/dm-thin.c struct thin_c *tc; tc 4208 drivers/md/dm-thin.c tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL); tc 4209 drivers/md/dm-thin.c if (!tc) { tc 4214 drivers/md/dm-thin.c tc->thin_md = dm_table_get_md(ti->table); tc 4215 drivers/md/dm-thin.c spin_lock_init(&tc->lock); tc 4216 drivers/md/dm-thin.c INIT_LIST_HEAD(&tc->deferred_cells); tc 4217 drivers/md/dm-thin.c bio_list_init(&tc->deferred_bio_list); tc 4218 drivers/md/dm-thin.c bio_list_init(&tc->retry_on_resume_list); tc 4219 drivers/md/dm-thin.c tc->sort_bio_list = RB_ROOT; tc 4233 drivers/md/dm-thin.c tc->origin_dev = origin_dev; tc 4241 drivers/md/dm-thin.c tc->pool_dev = pool_dev; tc 4243 drivers/md/dm-thin.c if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) { tc 4249 drivers/md/dm-thin.c pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev); tc 4256 drivers/md/dm-thin.c tc->pool = __pool_table_lookup(pool_md); tc 4257 drivers/md/dm-thin.c if (!tc->pool) { tc 4262 drivers/md/dm-thin.c __pool_inc(tc->pool); tc 4264 drivers/md/dm-thin.c if (get_pool_mode(tc->pool) == PM_FAIL) { tc 4270 drivers/md/dm-thin.c r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td); tc 4276 drivers/md/dm-thin.c r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block); tc 4285 drivers/md/dm-thin.c if (tc->pool->pf.discard_enabled) { tc 4292 drivers/md/dm-thin.c spin_lock_irqsave(&tc->pool->lock, flags); tc 4293 drivers/md/dm-thin.c if (tc->pool->suspended) { tc 4294 drivers/md/dm-thin.c spin_unlock_irqrestore(&tc->pool->lock, flags); tc 4300 drivers/md/dm-thin.c refcount_set(&tc->refcount, 1); tc 4301 drivers/md/dm-thin.c init_completion(&tc->can_destroy); tc 4302 drivers/md/dm-thin.c list_add_tail_rcu(&tc->list, &tc->pool->active_thins); tc 4303 drivers/md/dm-thin.c spin_unlock_irqrestore(&tc->pool->lock, flags); tc 4317 drivers/md/dm-thin.c dm_pool_close_thin_device(tc->td); tc 4319 drivers/md/dm-thin.c __pool_dec(tc->pool); tc 4323 drivers/md/dm-thin.c dm_put_device(ti, tc->pool_dev); tc 4325 drivers/md/dm-thin.c if (tc->origin_dev) tc 4326 drivers/md/dm-thin.c dm_put_device(ti, tc->origin_dev); tc 4328 drivers/md/dm-thin.c kfree(tc); tc 4349 drivers/md/dm-thin.c struct pool *pool = h->tc->pool; tc 4376 drivers/md/dm-thin.c cell_defer_no_holder(h->tc, h->cell); tc 4383 drivers/md/dm-thin.c struct thin_c *tc = ti->private; tc 4386 drivers/md/dm-thin.c noflush_work(tc, do_noflush_start); tc 4391 drivers/md/dm-thin.c struct thin_c *tc = ti->private; tc 4397 drivers/md/dm-thin.c noflush_work(tc, do_noflush_stop); tc 4402 drivers/md/dm-thin.c struct thin_c *tc = ti->private; tc 4404 drivers/md/dm-thin.c if (tc->origin_dev) tc 4405 drivers/md/dm-thin.c tc->origin_size = get_dev_size(tc->origin_dev->bdev); tc 4420 drivers/md/dm-thin.c struct thin_c *tc = ti->private; tc 4422 drivers/md/dm-thin.c if (get_pool_mode(tc->pool) == PM_FAIL) { tc 4427 drivers/md/dm-thin.c if (!tc->td) tc 4432 drivers/md/dm-thin.c r = dm_thin_get_mapped_count(tc->td, &mapped); tc 4438 drivers/md/dm-thin.c r = dm_thin_get_highest_mapped_block(tc->td, &highest); tc 4444 drivers/md/dm-thin.c DMEMIT("%llu ", mapped * tc->pool->sectors_per_block); tc 4447 drivers/md/dm-thin.c tc->pool->sectors_per_block) - 1); tc 4454 drivers/md/dm-thin.c format_dev_t(buf, tc->pool_dev->bdev->bd_dev), tc 4455 drivers/md/dm-thin.c (unsigned long) tc->dev_id); tc 4456 drivers/md/dm-thin.c if (tc->origin_dev) tc 4457 drivers/md/dm-thin.c DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev)); tc 4472 drivers/md/dm-thin.c struct thin_c *tc = ti->private; tc 4473 drivers/md/dm-thin.c struct pool *pool = tc->pool; tc 4485 drivers/md/dm-thin.c return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data); tc 4492 drivers/md/dm-thin.c struct thin_c *tc = ti->private; tc 4493 drivers/md/dm-thin.c struct pool *pool = tc->pool; tc 229 drivers/media/platform/qcom/venus/core.h struct v4l2_timecode tc; tc 490 drivers/media/platform/qcom/venus/helpers.c inst->tss[slot].tc = vbuf->timecode; tc 510 drivers/media/platform/qcom/venus/helpers.c vbuf->timecode = inst->tss[i].tc; tc 182 drivers/media/platform/vivid/vivid-vid-cap.c struct v4l2_timecode *tc = &vbuf->timecode; tc 196 drivers/media/platform/vivid/vivid-vid-cap.c tc->type = (fps == 30) ? V4L2_TC_TYPE_30FPS : V4L2_TC_TYPE_25FPS; tc 197 drivers/media/platform/vivid/vivid-vid-cap.c tc->flags = 0; tc 198 drivers/media/platform/vivid/vivid-vid-cap.c tc->frames = seq % fps; tc 199 drivers/media/platform/vivid/vivid-vid-cap.c tc->seconds = (seq / fps) % 60; tc 200 drivers/media/platform/vivid/vivid-vid-cap.c tc->minutes = (seq / (60 * fps)) % 60; tc 201 drivers/media/platform/vivid/vivid-vid-cap.c tc->hours = (seq / (60 * 60 * fps)) % 24; tc 473 drivers/media/v4l2-core/v4l2-ioctl.c const struct v4l2_timecode *tc = &p->timecode; tc 502 drivers/media/v4l2-core/v4l2-ioctl.c tc->hours, tc->minutes, tc->seconds, tc 503 drivers/media/v4l2-core/v4l2-ioctl.c tc->type, tc->flags, tc->frames, *(__u32 *)tc->userbits); tc 46 drivers/misc/atmel_tclib.c struct atmel_tc *tc; tc 50 drivers/misc/atmel_tclib.c list_for_each_entry(tc, &tc_list, node) { tc 51 drivers/misc/atmel_tclib.c if (tc->allocated) tc 54 drivers/misc/atmel_tclib.c if ((tc->pdev->dev.of_node && tc->id == block) || tc 55 drivers/misc/atmel_tclib.c (tc->pdev->id == block)) { tc 56 drivers/misc/atmel_tclib.c pdev = tc->pdev; tc 57 drivers/misc/atmel_tclib.c tc->allocated = true; tc 63 drivers/misc/atmel_tclib.c return pdev ? tc : NULL; tc 74 drivers/misc/atmel_tclib.c void atmel_tc_free(struct atmel_tc *tc) tc 77 drivers/misc/atmel_tclib.c if (tc->allocated) tc 78 drivers/misc/atmel_tclib.c tc->allocated = false; tc 109 drivers/misc/atmel_tclib.c struct atmel_tc *tc; tc 122 drivers/misc/atmel_tclib.c tc = devm_kzalloc(&pdev->dev, sizeof(struct atmel_tc), GFP_KERNEL); tc 123 drivers/misc/atmel_tclib.c if (!tc) tc 126 drivers/misc/atmel_tclib.c tc->pdev = pdev; tc 132 drivers/misc/atmel_tclib.c tc->slow_clk = devm_clk_get(&pdev->dev, "slow_clk"); tc 133 drivers/misc/atmel_tclib.c if (IS_ERR(tc->slow_clk)) tc 134 drivers/misc/atmel_tclib.c return PTR_ERR(tc->slow_clk); tc 137 drivers/misc/atmel_tclib.c tc->regs = devm_ioremap_resource(&pdev->dev, r); tc 138 drivers/misc/atmel_tclib.c if (IS_ERR(tc->regs)) tc 139 drivers/misc/atmel_tclib.c return PTR_ERR(tc->regs); tc 146 drivers/misc/atmel_tclib.c tc->tcb_config = match->data; tc 148 drivers/misc/atmel_tclib.c tc->id = of_alias_get_id(tc->pdev->dev.of_node, "tcb"); tc 150 drivers/misc/atmel_tclib.c tc->id = pdev->id; tc 153 drivers/misc/atmel_tclib.c tc->clk[0] = clk; tc 154 drivers/misc/atmel_tclib.c tc->clk[1] = devm_clk_get(&pdev->dev, "t1_clk"); tc 155 drivers/misc/atmel_tclib.c if (IS_ERR(tc->clk[1])) tc 156 drivers/misc/atmel_tclib.c tc->clk[1] = clk; tc 157 drivers/misc/atmel_tclib.c tc->clk[2] = devm_clk_get(&pdev->dev, "t2_clk"); tc 158 drivers/misc/atmel_tclib.c if (IS_ERR(tc->clk[2])) tc 159 drivers/misc/atmel_tclib.c tc->clk[2] = clk; tc 161 drivers/misc/atmel_tclib.c tc->irq[0] = irq; tc 162 drivers/misc/atmel_tclib.c tc->irq[1] = platform_get_irq(pdev, 1); tc 163 drivers/misc/atmel_tclib.c if (tc->irq[1] < 0) tc 164 drivers/misc/atmel_tclib.c tc->irq[1] = irq; tc 165 drivers/misc/atmel_tclib.c tc->irq[2] = platform_get_irq(pdev, 2); tc 166 drivers/misc/atmel_tclib.c if (tc->irq[2] < 0) tc 167 drivers/misc/atmel_tclib.c tc->irq[2] = irq; tc 170 drivers/misc/atmel_tclib.c writel(ATMEL_TC_ALL_IRQ, tc->regs + ATMEL_TC_REG(i, IDR)); tc 173 drivers/misc/atmel_tclib.c list_add_tail(&tc->node, &tc_list); tc 176 drivers/misc/atmel_tclib.c platform_set_drvdata(pdev, tc); tc 184 drivers/misc/atmel_tclib.c struct atmel_tc *tc = platform_get_drvdata(pdev); tc 187 drivers/misc/atmel_tclib.c writel(ATMEL_TC_ALL_IRQ, tc->regs + ATMEL_TC_REG(i, IDR)); tc 322 drivers/net/dsa/b53/b53_regs.h #define ARLTBL_TC(tc) ((3 & tc) << 11) tc 1322 drivers/net/ethernet/amd/declance.c MODULE_DEVICE_TABLE(tc, dec_lance_tc_table); tc 465 drivers/net/ethernet/amd/xgbe/xgbe-dev.c unsigned int prio, tc; tc 473 drivers/net/ethernet/amd/xgbe/xgbe-dev.c tc = pdata->ets->prio_tc[prio]; tc 476 drivers/net/ethernet/amd/xgbe/xgbe-dev.c if (pdata->pfc->pfc_en & (1 << tc)) tc 2253 drivers/net/ethernet/amd/xgbe/xgbe-drv.c u8 tc; tc 2259 drivers/net/ethernet/amd/xgbe/xgbe-drv.c tc = mqprio->num_tc; tc 2261 drivers/net/ethernet/amd/xgbe/xgbe-drv.c if (tc > pdata->hw_feat.tc_cnt) tc 2264 drivers/net/ethernet/amd/xgbe/xgbe-drv.c pdata->num_tcs = tc; tc 238 drivers/net/ethernet/aquantia/atlantic/aq_hw.h int (*hw_set_fc)(struct aq_hw_s *self, u32 fc, u32 tc); tc 578 drivers/net/ethernet/aquantia/atlantic/aq_nic.c unsigned int tc = 0U; tc 583 drivers/net/ethernet/aquantia/atlantic/aq_nic.c ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)]; tc 122 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c u32 tc = 0U; tc 146 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc); tc 150 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 100U, tc); tc 154 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 100U, tc); tc 157 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c tc = 0; tc 161 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc); tc 165 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 100U, tc); tc 169 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 100U, tc); tc 170 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc); tc 104 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c static int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc) tc 106 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc); tc 112 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c u32 tc = 0U; tc 135 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc); tc 139 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 100U, tc); tc 143 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 100U, tc); tc 146 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c tc = 0; tc 149 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc); tc 153 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 100U, tc); tc 157 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 100U, tc); tc 159 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c hw_atl_b0_set_fc(self, self->aq_nic_cfg->flow_control, tc); tc 736 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c u32 user_priority_tc_map, u32 tc) tc 755 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c aq_hw_write_reg_bit(aq_hw, rpf_rpb_rx_tc_upt_adr[tc], tc 756 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c rpf_rpb_rx_tc_upt_msk[tc], tc 757 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c rpf_rpb_rx_tc_upt_shft[tc], tc 1414 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c u32 tc) tc 1416 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTCREDIT_MAX_ADR(tc), tc 1424 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c u32 tc) tc 1426 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTWEIGHT_ADR(tc), tc 1443 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c u32 tc) tc 1445 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTCREDIT_MAX_ADR(tc), tc 1453 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c u32 tc) tc 1455 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTWEIGHT_ADR(tc), tc 373 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h u32 user_priority_tc_map, u32 tc); tc 666 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h u32 tc); tc 671 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h u32 tc); tc 680 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h u32 tc); tc 685 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h u32 tc); tc 2234 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h #define HW_ATL_TPS_DESC_TCTCREDIT_MAX_ADR(tc) (0x00007210 + (tc) * 0x4) tc 2253 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h #define HW_ATL_TPS_DESC_TCTWEIGHT_ADR(tc) (0x00007210 + (tc) * 0x4) tc 2290 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h #define HW_ATL_TPS_DATA_TCTCREDIT_MAX_ADR(tc) (0x00007110 + (tc) * 0x4) tc 2309 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h #define HW_ATL_TPS_DATA_TCTWEIGHT_ADR(tc) (0x00007110 + (tc) * 0x4) tc 10916 drivers/net/ethernet/broadcom/bnxt/bnxt.c int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) tc 10922 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (tc > bp->max_tc) { tc 10924 drivers/net/ethernet/broadcom/bnxt/bnxt.c tc, bp->max_tc); tc 10928 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (netdev_get_num_tc(dev) == tc) tc 10935 drivers/net/ethernet/broadcom/bnxt/bnxt.c sh, tc, bp->tx_nr_rings_xdp); tc 10943 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (tc) { tc 10944 drivers/net/ethernet/broadcom/bnxt/bnxt.c bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; tc 10945 drivers/net/ethernet/broadcom/bnxt/bnxt.c netdev_set_num_tc(dev, tc); tc 1999 drivers/net/ethernet/broadcom/bnxt/bnxt.h int bnxt_setup_mq_tc(struct net_device *dev, u8 tc); tc 80 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c int tc; tc 82 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c tc = bnxt_queue_to_tc(bp, queue_id); tc 83 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c if (tc >= 0) tc 84 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c ets->prio_tc[i] = tc; tc 154 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c int tc; tc 160 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c tc = bnxt_queue_to_tc(bp, cos2bw.queue_id); tc 161 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c if (tc < 0) tc 166 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c ets->tc_tsa[tc] = IEEE_8021QAZ_TSA_STRICT; tc 168 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c ets->tc_tsa[tc] = IEEE_8021QAZ_TSA_ETS; tc 169 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c ets->tc_tx_bw[tc] = cos2bw.bw_weight; tc 214 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c int tc = netdev_get_num_tc(bp->dev); tc 216 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c if (!tc) tc 217 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c tc = 1; tc 218 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c rc = bnxt_hwrm_queue_cos2bw_cfg(bp, bp->ieee_ets, tc); tc 440 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc) tc 472 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c *tc = bp->max_tc; tc 474 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c *tc = max_tc + 1; tc 268 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c int tx_xdp = 0, rc, tc; tc 283 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c tc = netdev_get_num_tc(dev); tc 284 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c if (!tc) tc 285 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c tc = 1; tc 287 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c true, tc, tx_xdp); tc 312 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp; tc 333 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h unsigned int tc; /* PCI-E traffic class */ tc 1784 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h unsigned int rxqi, unsigned int rxq, unsigned int tc, tc 438 drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c static void cxgb4_getpgtccfg(struct net_device *dev, int tc, tc 460 drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c *pgid = (be32_to_cpu(pcmd.u.dcb.pgid.pgid) >> (tc * 4)) & 0xf; tc 475 drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c *up_tc_map = (1 << tc); tc 482 drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c static void cxgb4_getpgtccfg_tx(struct net_device *dev, int tc, tc 487 drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per, tc 492 drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c static void cxgb4_getpgtccfg_rx(struct net_device *dev, int tc, tc 497 drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per, tc 501 drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc, tc 508 drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c int fw_tc = 7 - tc; tc 2592 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c S("Traffic Class", "%d", tc); tc 2924 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c pfres->tc = FW_PFVF_CMD_TC_G(word); tc 7588 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c unsigned int rxqi, unsigned int rxq, unsigned int tc, tc 7604 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.tc_to_nexactf = cpu_to_be32(FW_PFVF_CMD_TC_V(tc) | tc 2364 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c S("Traffic Class", "%d", tc); tc 250 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h unsigned int tc; /* PCI-E traffic class */ tc 1123 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c vfres->tc = FW_PFVF_CMD_TC_G(word); tc 307 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h u8 tc; tc 435 drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h u8 tc; tc 467 drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h u8 tc; tc 486 drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h u8 tc; tc 501 drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h u8 tc; tc 527 drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h u8 tc; tc 538 drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h u8 tc; tc 1377 drivers/net/ethernet/freescale/dpaa2/dpni.c u8 tc, tc 1391 drivers/net/ethernet/freescale/dpaa2/dpni.c cmd_params->tc = tc; tc 1425 drivers/net/ethernet/freescale/dpaa2/dpni.c u8 tc, tc 1441 drivers/net/ethernet/freescale/dpaa2/dpni.c cmd_params->tc = tc; tc 1529 drivers/net/ethernet/freescale/dpaa2/dpni.c u8 tc, tc 1543 drivers/net/ethernet/freescale/dpaa2/dpni.c cmd_params->tc = tc; tc 1573 drivers/net/ethernet/freescale/dpaa2/dpni.c u8 tc, tc 1589 drivers/net/ethernet/freescale/dpaa2/dpni.c cmd_params->tc = tc; tc 1672 drivers/net/ethernet/freescale/dpaa2/dpni.c cmd_params->tc = cfg->tc; tc 1705 drivers/net/ethernet/freescale/dpaa2/dpni.c cmd_params->tc = cfg->tc; tc 704 drivers/net/ethernet/freescale/dpaa2/dpni.h u8 tc; tc 822 drivers/net/ethernet/freescale/dpaa2/dpni.h u8 tc, tc 831 drivers/net/ethernet/freescale/dpaa2/dpni.h u8 tc, tc 881 drivers/net/ethernet/freescale/dpaa2/dpni.h u8 tc, tc 890 drivers/net/ethernet/freescale/dpaa2/dpni.h u8 tc, tc 563 drivers/net/ethernet/freescale/fec.h struct timecounter tc; tc 1234 drivers/net/ethernet/freescale/fec_main.c ns = timecounter_cyc2time(&fep->tc, ts); tc 136 drivers/net/ethernet/freescale/fec_ptp.c timecounter_read(&fep->tc); tc 150 drivers/net/ethernet/freescale/fec_ptp.c ns = timecounter_cyc2time(&fep->tc, tempval); tc 272 drivers/net/ethernet/freescale/fec_ptp.c timecounter_init(&fep->tc, &fep->cc, ktime_to_ns(ktime_get_real())); tc 343 drivers/net/ethernet/freescale/fec_ptp.c timecounter_read(&fep->tc); tc 364 drivers/net/ethernet/freescale/fec_ptp.c timecounter_adjtime(&fep->tc, delta); tc 386 drivers/net/ethernet/freescale/fec_ptp.c ns = timecounter_read(&adapter->tc); tc 427 drivers/net/ethernet/freescale/fec_ptp.c timecounter_init(&fep->tc, &fep->cc, ns); tc 524 drivers/net/ethernet/freescale/fec_ptp.c ns = timecounter_read(&fep->tc); tc 218 drivers/net/ethernet/hisilicon/hns3/hnae3.h int (*setup_tc)(struct hnae3_handle *handle, u8 tc); tc 562 drivers/net/ethernet/hisilicon/hns3/hnae3.h u8 tc; /* TC index */ tc 281 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c kinfo->tc_info[i].tc, tc 1578 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c u8 tc = mqprio_qopt->qopt.num_tc; tc 1584 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0))) tc 1587 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (tc > HNAE3_MAX_TC) tc 1596 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc); tc 1599 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c kinfo->dcb_ops->setup_tc(h, tc ? tc : 1, prio_tc) : -EOPNOTSUPP; tc 1687 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c int ring_en, tc; tc 1763 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG); tc 1771 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft); tc 3710 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c tc_info->tc); tc 4022 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) tc 4027 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (tc > HNAE3_MAX_TC) tc 107 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c u8 *tc, bool *changed) tc 150 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c *tc = max_tc + 1; tc 151 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c if (*tc != hdev->tm_info.num_tc) tc 400 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc) tc 409 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c ret = hclge_dcb_common_validate(hdev, tc, prio_tc); tc 417 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c hclge_tm_schd_info_update(hdev, tc); tc 430 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c if (tc > 1) tc 558 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c struct hclge_tqp_tx_queue_tc_cmd *tc; tc 589 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data; tc 591 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c tc->queue_id = cpu_to_le16(queue_id); tc 595 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c tc_id = tc->tc_id & 0x7; tc 2147 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c struct hclge_tc_thrd *tc; tc 2163 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j]; tc 2166 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); tc 2170 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); tc 229 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c u8 tc; tc 231 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c tc = hdev->tm_info.prio_tc[pri_id]; tc 233 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c if (tc >= hdev->tm_info.num_tc) tc 245 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4); tc 496 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id, tc 507 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c bp_to_qs_map_cmd->tc_id = tc; tc 563 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c kinfo->tc_info[i].tc = i; tc 569 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c kinfo->tc_info[i].tc = 0; tc 1251 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc) tc 1261 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c u16 qs_id = vport->qs_offset + tc; tc 1272 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap); tc 325 drivers/net/ethernet/intel/e1000e/e1000.h struct timecounter tc; tc 502 drivers/net/ethernet/intel/e1000e/netdev.c ns = timecounter_cyc2time(&adapter->tc, systim); tc 3928 drivers/net/ethernet/intel/e1000e/netdev.c timecounter_init(&adapter->tc, &adapter->cc, tc 85 drivers/net/ethernet/intel/e1000e/ptp.c timecounter_adjtime(&adapter->tc, delta); tc 133 drivers/net/ethernet/intel/e1000e/ptp.c *device = ns_to_ktime(timecounter_cyc2time(&adapter->tc, dev_cycles)); tc 186 drivers/net/ethernet/intel/e1000e/ptp.c ns = timecounter_cyc2time(&adapter->tc, cycles); tc 215 drivers/net/ethernet/intel/e1000e/ptp.c timecounter_init(&adapter->tc, &adapter->cc, ns); tc 246 drivers/net/ethernet/intel/e1000e/ptp.c ns = timecounter_read(&adapter->tc); tc 516 drivers/net/ethernet/intel/fm10k/fm10k.h int fm10k_setup_tc(struct net_device *dev, u8 tc); tc 1376 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c int fm10k_setup_tc(struct net_device *dev, u8 tc) tc 1382 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c if (tc && (interface->hw.mac.type != fm10k_mac_pf)) tc 1386 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c if (tc > 8) tc 1402 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c netdev_set_num_tc(dev, tc); tc 53 drivers/net/ethernet/intel/i40e/i40e_client.c u8 tc = dcb_cfg->etscfg.prioritytable[i]; tc 57 drivers/net/ethernet/intel/i40e/i40e_client.c if (!(vsi->tc_config.enabled_tc & BIT(tc))) tc 58 drivers/net/ethernet/intel/i40e/i40e_client.c tc = 0; tc 60 drivers/net/ethernet/intel/i40e/i40e_client.c qs_handle = le16_to_cpu(vsi->info.qs_handle[tc]); tc 61 drivers/net/ethernet/intel/i40e/i40e_client.c params->qos.prio_qos[i].tc = tc; tc 65 drivers/net/ethernet/intel/i40e/i40e_client.c tc, vsi->id); tc 66 drivers/net/ethernet/intel/i40e/i40e_client.h u8 tc; /* TC mapped to prio */ tc 595 drivers/net/ethernet/intel/i40e/i40e_dcb.c u8 i, tc, err; tc 604 drivers/net/ethernet/intel/i40e/i40e_dcb.c tc = (u8)((cee_cfg->oper_prio_tc[i] & tc 607 drivers/net/ethernet/intel/i40e/i40e_dcb.c dcbcfg->etscfg.prioritytable[i * 2] = tc; tc 608 drivers/net/ethernet/intel/i40e/i40e_dcb.c tc = (u8)((cee_cfg->oper_prio_tc[i] & tc 611 drivers/net/ethernet/intel/i40e/i40e_dcb.c dcbcfg->etscfg.prioritytable[i*2 + 1] = tc; tc 676 drivers/net/ethernet/intel/i40e/i40e_dcb.c u8 i, tc, err, sync, oper; tc 685 drivers/net/ethernet/intel/i40e/i40e_dcb.c tc = (u8)((cee_cfg->oper_prio_tc[i] & tc 688 drivers/net/ethernet/intel/i40e/i40e_dcb.c dcbcfg->etscfg.prioritytable[i * 2] = tc; tc 689 drivers/net/ethernet/intel/i40e/i40e_dcb.c tc = (u8)((cee_cfg->oper_prio_tc[i] & tc 692 drivers/net/ethernet/intel/i40e/i40e_dcb.c dcbcfg->etscfg.prioritytable[i * 2 + 1] = tc; tc 5005 drivers/net/ethernet/intel/i40e/i40e_main.c u8 tc, i; tc 5013 drivers/net/ethernet/intel/i40e/i40e_main.c tc = dcbcfg->etscfg.prioritytable[app.priority]; tc 5014 drivers/net/ethernet/intel/i40e/i40e_main.c enabled_tc |= BIT(tc); tc 7949 drivers/net/ethernet/intel/i40e/i40e_main.c static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc, tc 7955 drivers/net/ethernet/intel/i40e/i40e_main.c if (tc == 0) { tc 7958 drivers/net/ethernet/intel/i40e/i40e_main.c } else if (vsi->tc_config.enabled_tc & BIT(tc)) { tc 7968 drivers/net/ethernet/intel/i40e/i40e_main.c if (ch->seid == vsi->tc_seid_map[tc]) tc 7986 drivers/net/ethernet/intel/i40e/i40e_main.c int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid); tc 7991 drivers/net/ethernet/intel/i40e/i40e_main.c if (tc < 0) { tc 8024 drivers/net/ethernet/intel/i40e/i40e_main.c err = i40e_handle_tclass(vsi, tc, filter); tc 45 drivers/net/ethernet/intel/iavf/iavf_client.c params->qos.prio_qos[i].tc = 0; tc 66 drivers/net/ethernet/intel/iavf/iavf_client.h u8 tc; /* TC mapped to prio */ tc 2935 drivers/net/ethernet/intel/iavf/iavf_main.c static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc, tc 2938 drivers/net/ethernet/intel/iavf/iavf_main.c if (tc == 0) tc 2940 drivers/net/ethernet/intel/iavf/iavf_main.c if (tc < adapter->num_tc) { tc 2949 drivers/net/ethernet/intel/iavf/iavf_main.c filter->f.action_meta = tc; tc 2961 drivers/net/ethernet/intel/iavf/iavf_main.c int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); tc 2965 drivers/net/ethernet/intel/iavf/iavf_main.c if (tc < 0) { tc 2991 drivers/net/ethernet/intel/iavf/iavf_main.c err = iavf_handle_tclass(adapter, tc, filter); tc 3159 drivers/net/ethernet/intel/ice/ice_common.c ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) tc 3167 drivers/net/ethernet/intel/ice/ice_common.c if (q_handle >= vsi->num_lan_q_entries[tc]) tc 3169 drivers/net/ethernet/intel/ice/ice_common.c if (!vsi->lan_q_ctx[tc]) tc 3171 drivers/net/ethernet/intel/ice/ice_common.c q_ctx = vsi->lan_q_ctx[tc]; tc 3189 drivers/net/ethernet/intel/ice/ice_common.c ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, tc 3212 drivers/net/ethernet/intel/ice/ice_common.c q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle); tc 3221 drivers/net/ethernet/intel/ice/ice_common.c parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, tc 3280 drivers/net/ethernet/intel/ice/ice_common.c ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, tc 3312 drivers/net/ethernet/intel/ice/ice_common.c q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]); tc 122 drivers/net/ethernet/intel/ice/ice_common.h ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, tc 130 drivers/net/ethernet/intel/ice/ice_common.h ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, tc 1724 drivers/net/ethernet/intel/ice/ice_lib.c struct ice_aqc_add_tx_qgrp *qg_buf, u8 tc) tc 1750 drivers/net/ethernet/intel/ice/ice_lib.c status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle, tc 1786 drivers/net/ethernet/intel/ice/ice_lib.c u8 tc; tc 1795 drivers/net/ethernet/intel/ice/ice_lib.c ice_for_each_traffic_class(tc) { tc 1796 drivers/net/ethernet/intel/ice/ice_lib.c if (!(vsi->tc_cfg.ena_tc & BIT(tc))) tc 1799 drivers/net/ethernet/intel/ice/ice_lib.c for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) { tc 1801 drivers/net/ethernet/intel/ice/ice_lib.c qg_buf, tc); tc 2187 drivers/net/ethernet/intel/ice/ice_lib.c txq_meta->tc, 1, &txq_meta->q_handle, tc 2227 drivers/net/ethernet/intel/ice/ice_lib.c u8 tc = 0; tc 2230 drivers/net/ethernet/intel/ice/ice_lib.c tc = ring->dcb_tc; tc 2236 drivers/net/ethernet/intel/ice/ice_lib.c txq_meta->tc = tc; tc 2252 drivers/net/ethernet/intel/ice/ice_lib.c u8 tc; tc 2258 drivers/net/ethernet/intel/ice/ice_lib.c ice_for_each_traffic_class(tc) { tc 2259 drivers/net/ethernet/intel/ice/ice_lib.c if (!(vsi->tc_cfg.ena_tc & BIT(tc))) tc 2262 drivers/net/ethernet/intel/ice/ice_lib.c for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) { tc 19 drivers/net/ethernet/intel/ice/ice_lib.h u8 tc; tc 283 drivers/net/ethernet/intel/ice/ice_sched.c struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc) tc 290 drivers/net/ethernet/intel/ice/ice_sched.c if (pi->root->children[i]->tc_num == tc) tc 523 drivers/net/ethernet/intel/ice/ice_sched.c ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) tc 532 drivers/net/ethernet/intel/ice/ice_sched.c if (!vsi_ctx->lan_q_ctx[tc]) { tc 533 drivers/net/ethernet/intel/ice/ice_sched.c vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw), tc 537 drivers/net/ethernet/intel/ice/ice_sched.c if (!vsi_ctx->lan_q_ctx[tc]) tc 539 drivers/net/ethernet/intel/ice/ice_sched.c vsi_ctx->num_lan_q_entries[tc] = new_numqs; tc 543 drivers/net/ethernet/intel/ice/ice_sched.c if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) { tc 544 drivers/net/ethernet/intel/ice/ice_sched.c u16 prev_num = vsi_ctx->num_lan_q_entries[tc]; tc 550 drivers/net/ethernet/intel/ice/ice_sched.c memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc], tc 552 drivers/net/ethernet/intel/ice/ice_sched.c devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]); tc 553 drivers/net/ethernet/intel/ice/ice_sched.c vsi_ctx->lan_q_ctx[tc] = q_ctx; tc 554 drivers/net/ethernet/intel/ice/ice_sched.c vsi_ctx->num_lan_q_entries[tc] = new_numqs; tc 1127 drivers/net/ethernet/intel/ice/ice_sched.c ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, tc 1141 drivers/net/ethernet/intel/ice/ice_sched.c vsi_node = vsi_ctx->sched.vsi_node[tc]; tc 1383 drivers/net/ethernet/intel/ice/ice_sched.c ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc) tc 1389 drivers/net/ethernet/intel/ice/ice_sched.c tc_node = ice_sched_get_tc_node(pi, tc); tc 1413 drivers/net/ethernet/intel/ice/ice_sched.c u8 tc, u16 new_numqs, u8 owner) tc 1423 drivers/net/ethernet/intel/ice/ice_sched.c tc_node = ice_sched_get_tc_node(pi, tc); tc 1435 drivers/net/ethernet/intel/ice/ice_sched.c prev_numqs = vsi_ctx->sched.max_lanq[tc]; tc 1439 drivers/net/ethernet/intel/ice/ice_sched.c status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs); tc 1456 drivers/net/ethernet/intel/ice/ice_sched.c vsi_ctx->sched.max_lanq[tc] = new_numqs; tc 1475 drivers/net/ethernet/intel/ice/ice_sched.c ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, tc 1484 drivers/net/ethernet/intel/ice/ice_sched.c tc_node = ice_sched_get_tc_node(pi, tc); tc 1507 drivers/net/ethernet/intel/ice/ice_sched.c status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc); tc 1515 drivers/net/ethernet/intel/ice/ice_sched.c vsi_ctx->sched.vsi_node[tc] = vsi_node; tc 1521 drivers/net/ethernet/intel/ice/ice_sched.c vsi_ctx->sched.max_lanq[tc] = 0; tc 1525 drivers/net/ethernet/intel/ice/ice_sched.c status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs, tc 43 drivers/net/ethernet/intel/ice/ice_sched.h struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc); tc 45 drivers/net/ethernet/intel/ice/ice_sched.h ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, tc 48 drivers/net/ethernet/intel/ice/ice_sched.h ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, tc 17 drivers/net/ethernet/intel/ice/ice_type.h static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc) tc 19 drivers/net/ethernet/intel/ice/ice_type.h return test_bit(tc, &bitmap); tc 558 drivers/net/ethernet/intel/igb/igb.h struct timecounter tc; tc 2669 drivers/net/ethernet/intel/igb/igb_main.c int err, tc; tc 2671 drivers/net/ethernet/intel/igb/igb_main.c tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); tc 2672 drivers/net/ethernet/intel/igb/igb_main.c if (tc < 0) { tc 2681 drivers/net/ethernet/intel/igb/igb_main.c err = igb_parse_cls_flower(adapter, cls_flower, tc, filter); tc 174 drivers/net/ethernet/intel/igb/igb_ptp.c ns = timecounter_cyc2time(&adapter->tc, systim); tc 256 drivers/net/ethernet/intel/igb/igb_ptp.c timecounter_adjtime(&igb->tc, delta); tc 298 drivers/net/ethernet/intel/igb/igb_ptp.c ns = timecounter_cyc2time(&igb->tc, ((u64)hi << 32) | lo); tc 326 drivers/net/ethernet/intel/igb/igb_ptp.c ns = timecounter_cyc2time(&igb->tc, ((u64)hi << 32) | lo); tc 369 drivers/net/ethernet/intel/igb/igb_ptp.c timecounter_init(&igb->tc, &igb->cc, ns); tc 723 drivers/net/ethernet/intel/igb/igb_ptp.c ns = timecounter_read(&igb->tc); tc 1357 drivers/net/ethernet/intel/igb/igb_ptp.c timecounter_init(&adapter->tc, &adapter->cc, tc 915 drivers/net/ethernet/intel/ixgbe/ixgbe.h int ixgbe_setup_tc(struct net_device *dev, u8 tc); tc 165 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c int tc; tc 167 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c for (*pfc_en = 0, tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) { tc 168 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c if (tc_config[tc].dcb_pfc != pfc_disabled) tc 169 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c *pfc_en |= BIT(tc); tc 177 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c int tc; tc 179 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) tc 180 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c refill[tc] = tc_config[tc].path[direction].data_credits_refill; tc 186 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c int tc; tc 188 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) tc 189 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c max[tc] = tc_config[tc].desc_credits_max; tc 196 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c int tc; tc 198 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) tc 199 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c bwgid[tc] = tc_config[tc].path[direction].bwg_id; tc 206 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c int tc; tc 208 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) tc 209 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c ptype[tc] = tc_config[tc].path[direction].prio_type; tc 216 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c u8 tc = cfg->num_tcs.pg_tcs; tc 219 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c if (!tc) tc 227 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c for (tc--; tc; tc--) { tc 228 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap) tc 232 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c return tc; tc 42 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h u8 tc; tc 98 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h u8 tc; /* Traffic class (TC) */ tc 167 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, tc 174 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio; tc 176 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id = bwg_id; tc 178 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent = tc 181 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap = tc 193 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, tc 200 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio; tc 202 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id = bwg_id; tc 204 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent = tc 207 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap = tc 219 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, tc 225 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c *prio = adapter->dcb_cfg.tc_config[tc].path[0].prio_type; tc 226 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c *bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id; tc 227 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c *bw_pct = adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent; tc 228 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c *up_map = adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap; tc 239 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc, tc 245 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c *prio = adapter->dcb_cfg.tc_config[tc].path[1].prio_type; tc 246 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c *bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id; tc 247 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c *bw_pct = adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent; tc 248 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c *up_map = adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap; tc 90 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, tc 102 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c *tx = tc << 2; /* 0, 4, 8, 12, 16, 20, 24, 28 */ tc 103 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */ tc 116 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c *rx = tc << 4; tc 117 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c if (tc < 3) tc 118 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c *tx = tc << 5; /* 0, 32, 64 */ tc 119 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c else if (tc < 5) tc 120 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c *tx = (tc + 2) << 4; /* 80, 96 */ tc 122 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c *tx = (tc + 8) << 3; /* 104, 112, 120 */ tc 129 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c *rx = tc << 5; tc 130 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c if (tc < 2) tc 131 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c *tx = tc << 6; /* 0, 64 */ tc 133 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c *tx = (tc + 4) << 4; /* 96, 112 */ tc 151 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c int tc, offset, rss_i, i; tc 159 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) { tc 160 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx); tc 165 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c adapter->tx_ring[offset + i]->dcb_tc = tc; tc 166 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c adapter->rx_ring[offset + i]->dcb_tc = tc; tc 467 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c u8 tc = ixgbe_fcoe_get_tc(adapter); tc 471 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c f->offset = rss_i * tc; tc 964 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c u8 tc; tc 989 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c tc = netdev_get_prio_tc_map(adapter->netdev, i); tc 990 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c xoff[tc] += pxoffrxc; tc 997 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c tc = tx_ring->dcb_tc; tc 998 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (xoff[tc]) tc 1005 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c tc = xdp_ring->dcb_tc; tc 1006 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (xoff[tc]) tc 5104 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c int link, tc, kb, marker; tc 5108 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING; tc 5113 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && tc 5115 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; tc 5124 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c dv_id = IXGBE_DV_X540(link, tc); tc 5127 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c dv_id = IXGBE_DV(link, tc); tc 5133 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c dv_id += IXGBE_B2BT(tc); tc 5149 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c marker = tc + 1; tc 5165 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c int tc; tc 5169 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN; tc 5174 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && tc 5176 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; tc 5185 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c dv_id = IXGBE_LOW_DV_X540(tc); tc 5188 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c dv_id = IXGBE_LOW_DV(tc); tc 5225 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c u8 tc = adapter->hw_tcs; tc 5233 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL); tc 6190 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct tc_configuration *tc; tc 6214 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c tc = &adapter->dcb_cfg.tc_config[j]; tc 6215 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c tc->path[DCB_TX_CONFIG].bwg_id = 0; tc 6216 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1); tc 6217 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c tc->path[DCB_RX_CONFIG].bwg_id = 0; tc 6218 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1); tc 6219 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c tc->dcb_pfc = pfc_disabled; tc 6223 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c tc = &adapter->dcb_cfg.tc_config[0]; tc 6224 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; tc 6225 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; tc 8510 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c u8 tc = netdev_get_prio_tc_map(dev, skb->priority); tc 8513 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c txq = vdev->tc_to_txq[tc].offset; tc 8515 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c vdev->tc_to_txq[tc].count); tc 8985 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc) tc 9004 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (up2tc > tc) tc 9028 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c u8 tc = 0; tc 9031 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio); tc 9033 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c tc = ets->prio_tc[prio]; tc 9035 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c netdev_set_prio_tc_map(dev, prio, tc); tc 9094 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c int ixgbe_setup_tc(struct net_device *dev, u8 tc) tc 9100 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (tc > adapter->dcb_cfg.num_tcs.pg_tcs) tc 9103 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS) tc 9118 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (tc) { tc 9128 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c netdev_set_num_tc(dev, tc); tc 9131 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c adapter->hw_tcs = tc; tc 9145 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c adapter->hw_tcs = tc; tc 9151 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_validate_rtr(adapter, tc); tc 41 drivers/net/ethernet/mellanox/mlx4/en_clock.c static u64 mlx4_en_read_clock(const struct cyclecounter *tc) tc 44 drivers/net/ethernet/mellanox/mlx4/en_clock.c container_of(tc, struct mlx4_en_dev, cycles); tc 47 drivers/net/ethernet/mellanox/mlx4/en_clock.c return mlx4_read_clock(dev) & tc->mask; tc 167 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c int tc; tc 171 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) { tc 172 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c u8 tc_mask = 1 << tc; tc 174 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c switch (priv->cee_config.dcb_pfc[tc]) { tc 88 drivers/net/ethernet/mellanox/mlx4/en_netdev.c int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc) tc 104 drivers/net/ethernet/mellanox/mlx4/en_netdev.c new_prof.num_up = (tc == 0) ? MLX4_EN_NUM_UP_LOW : tc 134 drivers/net/ethernet/mellanox/mlx4/en_netdev.c err = mlx4_en_setup_tc(dev, tc); tc 1525 drivers/net/ethernet/mellanox/mlx4/fw.c int ts = 0, tc = 0; tc 1560 drivers/net/ethernet/mellanox/mlx4/fw.c ++tc; tc 1581 drivers/net/ethernet/mellanox/mlx4/fw.c mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW\n", tc, ts); tc 1584 drivers/net/ethernet/mellanox/mlx4/fw.c mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux\n", tc, ts); tc 1588 drivers/net/ethernet/mellanox/mlx4/fw.c tc, ts, (unsigned long long) virt - (ts << 10)); tc 63 drivers/net/ethernet/mellanox/mlx4/fw_qos.c struct mlx4_port_scheduler_tc_cfg_be tc[MLX4_NUM_TC]; tc 127 drivers/net/ethernet/mellanox/mlx4/fw_qos.c struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i]; tc 133 drivers/net/ethernet/mellanox/mlx4/fw_qos.c tc->max_bw_units = tc 137 drivers/net/ethernet/mellanox/mlx4/fw_qos.c tc->max_bw_units = tc 140 drivers/net/ethernet/mellanox/mlx4/fw_qos.c tc->max_bw_value = htons(r); tc 142 drivers/net/ethernet/mellanox/mlx4/fw_qos.c tc->max_bw_value = htons(MLX4_RATELIMIT_DEFAULT); tc 143 drivers/net/ethernet/mellanox/mlx4/fw_qos.c tc->max_bw_units = htons(MLX4_RATELIMIT_1G_UNITS); tc 146 drivers/net/ethernet/mellanox/mlx4/fw_qos.c tc->pg = htons(pg[i]); tc 147 drivers/net/ethernet/mellanox/mlx4/fw_qos.c tc->bw_precentage = htons(tc_tx_bw[i]); tc 777 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc); tc 218 drivers/net/ethernet/mellanox/mlx5/core/en/fs.h struct mlx5e_tc_table tc; tc 21 drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c int tc; tc 27 drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c for (tc = 0; tc < priv->max_opened_tc; tc++) { tc 28 drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c data->tx_packets += stats->sq[tc].packets; tc 29 drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c data->tx_bytes += stats->sq[tc].bytes; tc 149 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c struct mlx5e_txqsq *sq, int tc) tc 168 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c err = devlink_fmsg_u32_pair_put(fmsg, "tc", tc); tc 214 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c int i, tc, err = 0; tc 259 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c for (tc = 0; tc < priv->channels.params.num_tc; tc++) { tc 260 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c struct mlx5e_txqsq *sq = &c->sq[tc]; tc 262 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq, tc); tc 517 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c int tc; tc 523 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c for (tc = 0; tc < c->num_tc; tc++) { tc 525 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c &c->sq[tc].cq.mcq, tc 1135 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int tc) tc 1152 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->stats = &c->priv->channel_stats[c->ix].sq[tc]; tc 1314 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int tc) tc 1320 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq, tc); tc 1686 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int tc; tc 1688 drivers/net/ethernet/mellanox/mlx5/core/en_main.c for (tc = 0; tc < c->num_tc; tc++) { tc 1690 drivers/net/ethernet/mellanox/mlx5/core/en_main.c &cparam->tx_cq, &c->sq[tc].cq); tc 1698 drivers/net/ethernet/mellanox/mlx5/core/en_main.c for (tc--; tc >= 0; tc--) tc 1699 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_cq(&c->sq[tc].cq); tc 1706 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int tc; tc 1708 drivers/net/ethernet/mellanox/mlx5/core/en_main.c for (tc = 0; tc < c->num_tc; tc++) tc 1709 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_cq(&c->sq[tc].cq); tc 1716 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int err, tc; tc 1718 drivers/net/ethernet/mellanox/mlx5/core/en_main.c for (tc = 0; tc < params->num_tc; tc++) { tc 1719 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int txq_ix = c->ix + tc * params->num_channels; tc 1721 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix, tc 1722 drivers/net/ethernet/mellanox/mlx5/core/en_main.c params, &cparam->sq, &c->sq[tc], tc); tc 1730 drivers/net/ethernet/mellanox/mlx5/core/en_main.c for (tc--; tc >= 0; tc--) tc 1731 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_txqsq(&c->sq[tc]); tc 1738 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int tc; tc 1740 drivers/net/ethernet/mellanox/mlx5/core/en_main.c for (tc = 0; tc < c->num_tc; tc++) tc 1741 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_txqsq(&c->sq[tc]); tc 2034 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int tc; tc 2036 drivers/net/ethernet/mellanox/mlx5/core/en_main.c for (tc = 0; tc < c->num_tc; tc++) tc 2037 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_activate_txqsq(&c->sq[tc]); tc 2048 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int tc; tc 2055 drivers/net/ethernet/mellanox/mlx5/core/en_main.c for (tc = 0; tc < c->num_tc; tc++) tc 2056 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_deactivate_txqsq(&c->sq[tc]); tc 2885 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int tc; tc 2897 drivers/net/ethernet/mellanox/mlx5/core/en_main.c for (tc = 0; tc < ntc; tc++) tc 2898 drivers/net/ethernet/mellanox/mlx5/core/en_main.c netdev_set_tc_queue(netdev, tc, nch, 0); tc 2930 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int tc; tc 2932 drivers/net/ethernet/mellanox/mlx5/core/en_main.c for (tc = 0; tc < priv->channels.params.num_tc; tc++) { tc 2934 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_txqsq *sq = &c->sq[tc]; tc 2937 drivers/net/ethernet/mellanox/mlx5/core/en_main.c priv->channel_tc2realtxq[i][tc] = i + tc * ch; tc 3225 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int tc, i; tc 3228 drivers/net/ethernet/mellanox/mlx5/core/en_main.c for (tc = 0; tc < priv->profile->max_tc; tc++) tc 3229 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]); tc 3239 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int tc, i; tc 3243 drivers/net/ethernet/mellanox/mlx5/core/en_main.c for (tc = 0; tc < priv->profile->max_tc; tc++) { tc 3249 drivers/net/ethernet/mellanox/mlx5/core/en_main.c MLX5_SET(tisc, tisc, prio, tc << 1); tc 3254 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_create_tis(priv->mdev, in, &priv->tisn[i][tc]); tc 3264 drivers/net/ethernet/mellanox/mlx5/core/en_main.c for (tc--; tc >= 0; tc--) tc 3265 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]); tc 3266 drivers/net/ethernet/mellanox/mlx5/core/en_main.c tc = priv->profile->max_tc; tc 3464 drivers/net/ethernet/mellanox/mlx5/core/en_main.c u8 tc = mqprio->num_tc; tc 3469 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (tc && tc != MLX5E_MAX_NUM_TC) tc 3475 drivers/net/ethernet/mellanox/mlx5/core/en_main.c new_channels.params.num_tc = tc ? tc : 1; tc 471 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c int n, tc, num_sqs = 0; tc 481 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c for (tc = 0; tc < c->num_tc; tc++) tc 482 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c sqs[num_sqs++] = c->sq[tc].sqn; tc 1583 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c int i, j, tc; tc 1602 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c for (tc = 0; tc < priv->max_opened_tc; tc++) tc 1607 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c i + tc * max_nch); tc 1626 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c int i, j, tc; tc 1649 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c for (tc = 0; tc < priv->max_opened_tc; tc++) tc 1653 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc], tc 305 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c &priv->fs.tc.mod_hdr; tc 702 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe, tc 717 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock)) tc 720 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c mutex_unlock(&priv->fs.tc.hairpin_tbl_lock); tc 799 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c mutex_lock(&priv->fs.tc.hairpin_tbl_lock); tc 802 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c mutex_unlock(&priv->fs.tc.hairpin_tbl_lock); tc 814 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c mutex_unlock(&priv->fs.tc.hairpin_tbl_lock); tc 826 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist, tc 828 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c mutex_unlock(&priv->fs.tc.hairpin_tbl_lock); tc 955 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c mutex_lock(&priv->fs.tc.t_lock); tc 956 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c if (IS_ERR_OR_NULL(priv->fs.tc.t)) { tc 968 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c priv->fs.tc.t = tc 974 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c if (IS_ERR(priv->fs.tc.t)) { tc 975 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c mutex_unlock(&priv->fs.tc.t_lock); tc 980 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c return PTR_ERR(priv->fs.tc.t); tc 987 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec, tc 989 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c mutex_unlock(&priv->fs.tc.t_lock); tc 1005 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c mutex_lock(&priv->fs.tc.t_lock); tc 1006 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) && priv->fs.tc.t) { tc 1007 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c mlx5_destroy_flow_table(priv->fs.tc.t); tc 1008 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c priv->fs.tc.t = NULL; tc 1010 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c mutex_unlock(&priv->fs.tc.t_lock); tc 3500 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c return &priv->fs.tc.ht; tc 4064 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c mutex_lock(&priv->fs.tc.hairpin_tbl_lock); tc 4065 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) tc 4068 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c mutex_unlock(&priv->fs.tc.hairpin_tbl_lock); tc 4085 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c struct mlx5e_tc_table *tc; tc 4093 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c tc = container_of(this, struct mlx5e_tc_table, netdevice_nb); tc 4094 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c fs = container_of(tc, struct mlx5e_flow_steering, tc); tc 4108 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c struct mlx5e_tc_table *tc = &priv->fs.tc; tc 4111 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c mutex_init(&tc->t_lock); tc 4112 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c mutex_init(&tc->mod_hdr.lock); tc 4113 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c hash_init(tc->mod_hdr.hlist); tc 4114 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c mutex_init(&tc->hairpin_tbl_lock); tc 4115 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c hash_init(tc->hairpin_tbl); tc 4117 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c err = rhashtable_init(&tc->ht, &tc_ht_params); tc 4121 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event; tc 4122 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c if (register_netdevice_notifier(&tc->netdevice_nb)) { tc 4123 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c tc->netdevice_nb.notifier_call = NULL; tc 4141 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c struct mlx5e_tc_table *tc = &priv->fs.tc; tc 4143 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c if (tc->netdevice_nb.notifier_call) tc 4144 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c unregister_netdevice_notifier(&tc->netdevice_nb); tc 4146 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c mutex_destroy(&tc->mod_hdr.lock); tc 4147 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c mutex_destroy(&tc->hairpin_tbl_lock); tc 4149 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c rhashtable_destroy(&tc->ht); tc 4151 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c if (!IS_ERR_OR_NULL(tc->t)) { tc 4152 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c mlx5_destroy_flow_table(tc->t); tc 4153 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c tc->t = NULL; tc 4155 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c mutex_destroy(&tc->t_lock); tc 91 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c clock_info->cycles = clock->tc.cycle_last; tc 93 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c clock_info->nsec = clock->tc.nsec; tc 94 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c clock_info->frac = clock->tc.frac; tc 137 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c timecounter_read(&clock->tc); tc 152 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c timecounter_init(&clock->tc, &clock->cycles, ns); tc 171 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c ns = timecounter_cyc2time(&clock->tc, cycles); tc 186 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c timecounter_adjtime(&clock->tc, delta); tc 212 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c timecounter_read(&clock->tc); tc 333 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c nsec_now = timecounter_cyc2time(&clock->tc, cycles_now); tc 480 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c ptp_event.timestamp = timecounter_cyc2time(&clock->tc, tc 499 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c nsec_now = timecounter_cyc2time(&clock->tc, cycles_now); tc 537 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c timecounter_init(&clock->tc, &clock->cycles, tc 558 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c mdev->clock_info->nsec = clock->tc.nsec; tc 559 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c mdev->clock_info->cycles = clock->tc.cycle_last; tc 563 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c mdev->clock_info->frac = clock->tc.frac; tc 53 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h nsec = timecounter_cyc2time(&clock->tc, timestamp); tc 598 drivers/net/ethernet/mellanox/mlx5/core/port.c u8 prio, u8 *tc) tc 613 drivers/net/ethernet/mellanox/mlx5/core/port.c *tc = MLX5_GET(qtct_reg, out, tclass); tc 659 drivers/net/ethernet/mellanox/mlx5/core/port.c u8 tc, u8 *tc_group) tc 670 drivers/net/ethernet/mellanox/mlx5/core/port.c tc_configuration[tc]); tc 694 drivers/net/ethernet/mellanox/mlx5/core/port.c u8 tc, u8 *bw_pct) tc 705 drivers/net/ethernet/mellanox/mlx5/core/port.c tc_configuration[tc]); tc 5517 drivers/net/ethernet/mellanox/mlxsw/reg.h u8 priority, u8 tc) tc 5535 drivers/net/ethernet/mellanox/mlxsw/reg.h mlxsw_reg_htgt_local_path_cpu_tclass_set(payload, tc); tc 2319 drivers/net/ethernet/mellanox/mlxsw/spectrum.c static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) tc 2325 drivers/net/ethernet/mellanox/mlxsw/spectrum.c mlxsw_sp_port_hw_tc_stats[i].str, tc); tc 4527 drivers/net/ethernet/mellanox/mlxsw/spectrum.c u8 priority, tc; tc 4548 drivers/net/ethernet/mellanox/mlxsw/spectrum.c tc = 5; tc 4553 drivers/net/ethernet/mellanox/mlxsw/spectrum.c tc = 4; tc 4559 drivers/net/ethernet/mellanox/mlxsw/spectrum.c tc = 3; tc 4566 drivers/net/ethernet/mellanox/mlxsw/spectrum.c tc = 2; tc 4574 drivers/net/ethernet/mellanox/mlxsw/spectrum.c tc = 1; tc 4578 drivers/net/ethernet/mellanox/mlxsw/spectrum.c tc = MLXSW_REG_HTGT_DEFAULT_TC; tc 4589 drivers/net/ethernet/mellanox/mlxsw/spectrum.c mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); tc 65 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c struct timecounter tc; tc 114 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c static u64 mlxsw_sp1_ptp_ns2cycles(const struct timecounter *tc, u64 nsec) tc 118 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c cycles <<= tc->cc->shift; tc 119 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c cycles = div_u64(cycles, tc->cc->mult); tc 137 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c cycles = mlxsw_sp1_ptp_ns2cycles(&clock->tc, next_sec_in_nsec); tc 172 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c timecounter_read(&clock->tc); tc 187 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c timecounter_adjtime(&clock->tc, delta); tc 188 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c nsec = timecounter_read(&clock->tc); tc 204 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c nsec = timecounter_cyc2time(&clock->tc, cycles); tc 220 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c timecounter_init(&clock->tc, &clock->cycles, nsec); tc 221 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c nsec = timecounter_read(&clock->tc); tc 245 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c timecounter_read(&clock->tc); tc 270 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c timecounter_init(&clock->tc, &clock->cycles, tc 463 drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c nsec = timecounter_cyc2time(&mlxsw_sp->clock->tc, timestamp); tc 235 drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c u8 priority, tc, group_id; tc 243 drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c tc = 1; tc 249 drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c mlxsw_reg_htgt_pack(htgt_pl, group_id, policer_id, priority, tc); tc 960 drivers/net/ethernet/mscc/ocelot.c port->tc.offload_cnt) { tc 501 drivers/net/ethernet/mscc/ocelot.h struct ocelot_port_tc tc; tc 205 drivers/net/ethernet/mscc/ocelot_flower.c port_block->port->tc.offload_cnt++; tc 223 drivers/net/ethernet/mscc/ocelot_flower.c port_block->port->tc.offload_cnt--; tc 37 drivers/net/ethernet/mscc/ocelot_tc.c if (port->tc.block_shared) { tc 50 drivers/net/ethernet/mscc/ocelot_tc.c if (port->tc.police_id && port->tc.police_id != f->cookie) { tc 67 drivers/net/ethernet/mscc/ocelot_tc.c port->tc.police_id = f->cookie; tc 68 drivers/net/ethernet/mscc/ocelot_tc.c port->tc.offload_cnt++; tc 71 drivers/net/ethernet/mscc/ocelot_tc.c if (port->tc.police_id != f->cookie) tc 80 drivers/net/ethernet/mscc/ocelot_tc.c port->tc.police_id = 0; tc 81 drivers/net/ethernet/mscc/ocelot_tc.c port->tc.offload_cnt--; tc 145 drivers/net/ethernet/mscc/ocelot_tc.c port->tc.block_shared = f->block_shared; tc 50 drivers/net/ethernet/netronome/nfp/flower/action.c if (act->mpls_push.tc != ACT_MPLS_TC_NOT_SET) tc 51 drivers/net/ethernet/netronome/nfp/flower/action.c mpls_lse |= act->mpls_push.tc << MPLS_LS_TC_SHIFT; tc 87 drivers/net/ethernet/netronome/nfp/flower/action.c if (act->mpls_mangle.tc != ACT_MPLS_TC_NOT_SET) { tc 88 drivers/net/ethernet/netronome/nfp/flower/action.c mpls_lse |= act->mpls_mangle.tc << MPLS_LS_TC_SHIFT; tc 520 drivers/net/ethernet/qlogic/qed/qed.h u8 tc; tc 943 drivers/net/ethernet/qlogic/qed/qed.h u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc); tc 945 drivers/net/ethernet/qlogic/qed/qed.h u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc); tc 946 drivers/net/ethernet/qlogic/qed/qed.h u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc); tc 991 drivers/net/ethernet/qlogic/qed/qed.h void qed_hw_info_set_offload_tc(struct qed_hw_info *p_info, u8 tc); tc 187 drivers/net/ethernet/qlogic/qed/qed_dcbx.c p_data->arr[id].tc, p_hwfn->hw_info.num_active_tc); tc 194 drivers/net/ethernet/qlogic/qed/qed_dcbx.c bool app_tlv, bool enable, u8 prio, u8 tc, tc 201 drivers/net/ethernet/qlogic/qed/qed_dcbx.c p_data->arr[type].tc = tc; tc 212 drivers/net/ethernet/qlogic/qed/qed_dcbx.c qed_hw_info_set_offload_tc(&p_hwfn->hw_info, tc); tc 226 drivers/net/ethernet/qlogic/qed/qed_dcbx.c bool app_tlv, bool enable, u8 prio, u8 tc, tc 242 drivers/net/ethernet/qlogic/qed/qed_dcbx.c prio, tc, type, personality); tc 283 drivers/net/ethernet/qlogic/qed/qed_dcbx.c u8 tc, priority_map; tc 304 drivers/net/ethernet/qlogic/qed/qed_dcbx.c tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority); tc 321 drivers/net/ethernet/qlogic/qed/qed_dcbx.c enable, priority, tc, type); tc 327 drivers/net/ethernet/qlogic/qed/qed_dcbx.c p_data->arr[DCBX_PROTOCOL_ETH].tc = p_hwfn->ufp_info.tc; tc 335 drivers/net/ethernet/qlogic/qed/qed_dcbx.c tc = p_data->arr[DCBX_PROTOCOL_ETH].tc; tc 343 drivers/net/ethernet/qlogic/qed/qed_dcbx.c priority, tc, type); tc 932 drivers/net/ethernet/qlogic/qed/qed_dcbx.c val = (0x1 << p_data->arr[DCBX_PROTOCOL_ROCE].tc) | tc 933 drivers/net/ethernet/qlogic/qed/qed_dcbx.c (0x1 << p_data->arr[DCBX_PROTOCOL_ROCE_V2].tc); tc 965 drivers/net/ethernet/qlogic/qed/qed_dcbx.c p_data->dcb_tc = p_src->arr[type].tc; tc 1376 drivers/net/ethernet/qlogic/qed/qed_dcbx.c static void qed_dcbnl_getpgtccfgtx(struct qed_dev *cdev, int tc, u8 *prio_type, tc 1382 drivers/net/ethernet/qlogic/qed/qed_dcbx.c DP_VERBOSE(hwfn, QED_MSG_DCB, "tc = %d\n", tc); tc 1384 drivers/net/ethernet/qlogic/qed/qed_dcbx.c if (tc < 0 || tc >= QED_MAX_PFC_PRIORITIES) { tc 1385 drivers/net/ethernet/qlogic/qed/qed_dcbx.c DP_INFO(hwfn, "Invalid tc %d\n", tc); tc 1393 drivers/net/ethernet/qlogic/qed/qed_dcbx.c *pgid = dcbx_info->operational.params.ets_pri_tc_tbl[tc]; tc 1418 drivers/net/ethernet/qlogic/qed/qed_dcbx.c static void qed_dcbnl_getpgtccfgrx(struct qed_dev *cdev, int tc, u8 *prio, tc 1590 drivers/net/ethernet/qlogic/qed/qed_dcbx.c int tc, tc 1600 drivers/net/ethernet/qlogic/qed/qed_dcbx.c tc, pri_type, pgid, bw_pct, up_map); tc 1602 drivers/net/ethernet/qlogic/qed/qed_dcbx.c if (tc < 0 || tc >= QED_MAX_PFC_PRIORITIES) { tc 1603 drivers/net/ethernet/qlogic/qed/qed_dcbx.c DP_INFO(hwfn, "Invalid tc %d\n", tc); tc 1613 drivers/net/ethernet/qlogic/qed/qed_dcbx.c dcbx_set.config.params.ets_pri_tc_tbl[tc] = pgid; tc 57 drivers/net/ethernet/qlogic/qed/qed_dcbx.h u8 tc; /* Traffic Class */ tc 1645 drivers/net/ethernet/qlogic/qed/qed_dev.c void qed_hw_info_set_offload_tc(struct qed_hw_info *p_info, u8 tc) tc 1647 drivers/net/ethernet/qlogic/qed/qed_dev.c p_info->offload_tc = tc; tc 1666 drivers/net/ethernet/qlogic/qed/qed_dev.c u8 tc, u32 pq_init_flags) tc 1678 drivers/net/ethernet/qlogic/qed/qed_dev.c qm_info->qm_pq_params[pq_idx].tc_id = tc; tc 1762 drivers/net/ethernet/qlogic/qed/qed_dev.c u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc) tc 1772 drivers/net/ethernet/qlogic/qed/qed_dev.c if (tc > max_tc) tc 1773 drivers/net/ethernet/qlogic/qed/qed_dev.c DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc); tc 1775 drivers/net/ethernet/qlogic/qed/qed_dev.c return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + (tc % max_tc); tc 1794 drivers/net/ethernet/qlogic/qed/qed_dev.c u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc) tc 1799 drivers/net/ethernet/qlogic/qed/qed_dev.c pq_offset = (tc < qed_init_qm_get_num_mtc_tcs(p_hwfn)) ? tc 1800 drivers/net/ethernet/qlogic/qed/qed_dev.c tc : PQ_INIT_DEFAULT_TC; tc 1805 drivers/net/ethernet/qlogic/qed/qed_dev.c u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc) tc 1810 drivers/net/ethernet/qlogic/qed/qed_dev.c pq_offset = (tc < qed_init_qm_get_num_mtc_tcs(p_hwfn)) ? tc 1811 drivers/net/ethernet/qlogic/qed/qed_dev.c tc : PQ_INIT_DEFAULT_TC; tc 1855 drivers/net/ethernet/qlogic/qed/qed_dev.c u8 tc; tc 1858 drivers/net/ethernet/qlogic/qed/qed_dev.c for (tc = 0; tc < num_tcs; tc++) tc 1861 drivers/net/ethernet/qlogic/qed/qed_dev.c p_hwfn->hw_info.offload_tc : tc, tc 1988 drivers/net/ethernet/qlogic/qed/qed_dev.c int i, tc; tc 2040 drivers/net/ethernet/qlogic/qed/qed_dev.c for (tc = 0; tc < NUM_OF_TCS; tc++) tc 2043 drivers/net/ethernet/qlogic/qed/qed_dev.c "%d ", vport->first_tx_pq_id[tc]); tc 297 drivers/net/ethernet/qlogic/qed/qed_hw.h u8 tc; tc 303 drivers/net/ethernet/qlogic/qed/qed_hw.h u8 tc; tc 201 drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c #define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \ tc 202 drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | \ tc 211 drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c u8 port_id, u8 tc, u8 max_phys_tcs_per_port) tc 213 drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c if (tc == PURE_LB_TC) tc 216 drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c return port_id * max_phys_tcs_per_port + tc; tc 320 drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c u8 tc, ext_voq, port_id, num_tcs_in_port; tc 342 drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c for (tc = 0; tc < max_phys_tcs_per_port; tc++) tc 344 drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c tc) & 0x1) == 1) tc 349 drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c for (tc = 0; tc < max_phys_tcs_per_port; tc++) { tc 352 drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c tc, max_phys_tcs_per_port); tc 354 drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c tc) & 0x1) == 1) tc 376 drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c u8 tc, ext_voq, port_id, num_tcs_in_port; tc 390 drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) tc 392 drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c tc) & 0x1) == 1) tc 404 drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) { tc 406 drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c tc) & 0x1) == 1) { tc 410 drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c tc, tc 677 drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c u8 tc, i; tc 692 drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c for (tc = 0; tc < NUM_OF_TCS; tc++) { tc 693 drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c vport_pq_id = vport_params[i].first_tx_pq_id[tc]; tc 856 drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c u8 tc, i; tc 860 drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c for (tc = 0; tc < NUM_OF_TCS; tc++) tc 861 drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID; tc 932 drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c u8 tc; tc 940 drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c for (tc = 0; tc < NUM_OF_TCS; tc++) { tc 941 drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c vport_pq_id = first_tx_pq_id[tc]; tc 1128 drivers/net/ethernet/qlogic/qed/qed_l2.c u8 tc, tc 1137 drivers/net/ethernet/qlogic/qed/qed_l2.c qed_get_cm_pq_idx_mcos(p_hwfn, tc)); tc 1152 drivers/net/ethernet/qlogic/qed/qed_l2.c u8 tc, tc 1165 drivers/net/ethernet/qlogic/qed/qed_l2.c rc = qed_eth_pf_tx_queue_start(p_hwfn, p_cid, tc, tc 2572 drivers/net/ethernet/qlogic/qed/qed_l2.c p_params, p_params->tc, tc 1738 drivers/net/ethernet/qlogic/qed/qed_mcp.c p_hwfn->ufp_info.tc = (u8)val; tc 1754 drivers/net/ethernet/qlogic/qed/qed_mcp.c p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc, tc 1764 drivers/net/ethernet/qlogic/qed/qed_mcp.c p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc; tc 1766 drivers/net/ethernet/qlogic/qed/qed_mcp.c p_hwfn->ufp_info.tc); tc 231 drivers/net/ethernet/qlogic/qed/qed_roce.c u8 pri, tc = 0; tc 235 drivers/net/ethernet/qlogic/qed/qed_roce.c tc = qed_dcbx_get_priority_tc(p_hwfn, pri); tc 240 drivers/net/ethernet/qlogic/qed/qed_roce.c qp->icid, tc, qp->vlan_id ? "enabled" : "disabled"); tc 242 drivers/net/ethernet/qlogic/qed/qed_roce.c return tc; tc 255 drivers/net/ethernet/qlogic/qed/qed_roce.c u8 tc; tc 339 drivers/net/ethernet/qlogic/qed/qed_roce.c tc = qed_roce_get_qp_tc(p_hwfn, qp); tc 340 drivers/net/ethernet/qlogic/qed/qed_roce.c regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc); tc 341 drivers/net/ethernet/qlogic/qed/qed_roce.c low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc); tc 396 drivers/net/ethernet/qlogic/qed/qed_roce.c u8 tc; tc 469 drivers/net/ethernet/qlogic/qed/qed_roce.c tc = qed_roce_get_qp_tc(p_hwfn, qp); tc 470 drivers/net/ethernet/qlogic/qed/qed_roce.c regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc); tc 471 drivers/net/ethernet/qlogic/qed/qed_roce.c low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc); tc 388 drivers/net/ethernet/qlogic/qed/qed_sp_commands.c cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13)); tc 609 drivers/net/ethernet/qlogic/qed/qed_sp_commands.c cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13)); tc 2002 drivers/net/ethernet/qlogic/qede/qede_main.c params.tc = txq->cos; tc 39 drivers/net/ethernet/qlogic/qede/qede_ptp.c struct timecounter tc; tc 95 drivers/net/ethernet/qlogic/qede/qede_ptp.c timecounter_adjtime(&ptp->tc, delta); tc 111 drivers/net/ethernet/qlogic/qede/qede_ptp.c ns = timecounter_read(&ptp->tc); tc 137 drivers/net/ethernet/qlogic/qede/qede_ptp.c timecounter_init(&ptp->tc, &ptp->cc, ns); tc 192 drivers/net/ethernet/qlogic/qede/qede_ptp.c ns = timecounter_cyc2time(&ptp->tc, timestamp); tc 457 drivers/net/ethernet/qlogic/qede/qede_ptp.c timecounter_init(&ptp->tc, &ptp->cc, tc 575 drivers/net/ethernet/qlogic/qede/qede_ptp.c ns = timecounter_cyc2time(&ptp->tc, timestamp); tc 661 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c u8 i, tc, pgid; tc 664 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c tc = QLC_DCB_GET_TC_PRIO(mbx->prio_tc_map, i); tc 665 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c tc_cfg = &type->tc_cfg[tc]; tc 793 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c qlcnic_dcb_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 *prio, tc 808 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c if (tc < 0 || (tc >= QLC_DCB_MAX_TC)) tc 811 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c tc_cfg = &type->tc_cfg[tc]; tc 1101 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c struct qlcnic_dcb_tc_cfg *tc; tc 1113 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c tc = &peer->tc_cfg[i]; tc 1114 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c prio = qlcnic_dcb_prio_count(tc->up_tc_map); tc 432 drivers/net/ethernet/sfc/falcon/tx.c unsigned tc, num_tc; tc 448 drivers/net/ethernet/sfc/falcon/tx.c for (tc = 0; tc < num_tc; tc++) { tc 449 drivers/net/ethernet/sfc/falcon/tx.c net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels; tc 450 drivers/net/ethernet/sfc/falcon/tx.c net_dev->tc_to_txq[tc].count = efx->n_tx_channels; tc 689 drivers/net/ethernet/sfc/tx.c unsigned tc, num_tc; tc 705 drivers/net/ethernet/sfc/tx.c for (tc = 0; tc < num_tc; tc++) { tc 706 drivers/net/ethernet/sfc/tx.c net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels; tc 707 drivers/net/ethernet/sfc/tx.c net_dev->tc_to_txq[tc].count = efx->n_tx_channels; tc 440 drivers/net/ethernet/stmicro/stmmac/common.h const struct stmmac_tc_ops *tc; tc 83 drivers/net/ethernet/stmicro/stmmac/hwif.c const void *tc; tc 103 drivers/net/ethernet/stmicro/stmmac/hwif.c .tc = NULL, tc 121 drivers/net/ethernet/stmicro/stmmac/hwif.c .tc = NULL, tc 139 drivers/net/ethernet/stmicro/stmmac/hwif.c .tc = &dwmac510_tc_ops, tc 157 drivers/net/ethernet/stmicro/stmmac/hwif.c .tc = &dwmac510_tc_ops, tc 175 drivers/net/ethernet/stmicro/stmmac/hwif.c .tc = &dwmac510_tc_ops, tc 193 drivers/net/ethernet/stmicro/stmmac/hwif.c .tc = &dwmac510_tc_ops, tc 211 drivers/net/ethernet/stmicro/stmmac/hwif.c .tc = &dwmac510_tc_ops, tc 277 drivers/net/ethernet/stmicro/stmmac/hwif.c mac->tc = mac->tc ? : entry->tc; tc 529 drivers/net/ethernet/stmicro/stmmac/hwif.h stmmac_do_callback(__priv, tc, init, __args) tc 531 drivers/net/ethernet/stmicro/stmmac/hwif.h stmmac_do_callback(__priv, tc, setup_cls_u32, __args) tc 533 drivers/net/ethernet/stmicro/stmmac/hwif.h stmmac_do_callback(__priv, tc, setup_cbs, __args) tc 535 drivers/net/ethernet/stmicro/stmmac/hwif.h stmmac_do_callback(__priv, tc, setup_cls, __args) tc 77 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c static int tc = TC_DEFAULT; tc 78 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c module_param(tc, int, 0644); tc 79 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c MODULE_PARM_DESC(tc, "DMA threshold control value"); tc 1828 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c txmode = tc; tc 1829 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c rxmode = tc; tc 1842 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c txmode = tc; tc 2103 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (tc <= 256)) { tc 2104 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c tc += 64; tc 2107 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c tc, tc 2108 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c tc, tc 2112 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c tc, tc 2115 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c priv->xstats.threshold = tc; tc 2667 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c priv->xstats.threshold = tc; tc 4920 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c if (kstrtoint(opt + 3, 0, &tc)) tc 1413 drivers/net/ethernet/ti/cpsw.c static int cpsw_tc_to_fifo(int tc, int num_tc) tc 1415 drivers/net/ethernet/ti/cpsw.c if (tc == num_tc - 1) tc 1418 drivers/net/ethernet/ti/cpsw.c return CPSW_FIFO_SHAPERS_NUM - tc; tc 1538 drivers/net/ethernet/ti/cpsw.c int tc, ret, fifo; tc 1541 drivers/net/ethernet/ti/cpsw.c tc = netdev_txq_to_tc(priv->ndev, qopt->queue); tc 1547 drivers/net/ethernet/ti/cpsw.c fifo = cpsw_tc_to_fifo(tc, ndev->num_tc); tc 1549 drivers/net/ethernet/ti/cpsw.c dev_err(priv->dev, "Last tc%d can't be rate limited", tc); tc 1610 drivers/net/ethernet/ti/cpsw.c int i, tc, fifo; tc 1617 drivers/net/ethernet/ti/cpsw.c tc = netdev_get_prio_tc_map(priv->ndev, i); tc 1618 drivers/net/ethernet/ti/cpsw.c fifo = CPSW_FIFO_SHAPERS_NUM - tc; tc 2277 drivers/net/ethernet/ti/cpsw.c int i, tc, ret; tc 2294 drivers/net/ethernet/ti/cpsw.c tc = mqprio->qopt.prio_tc_map[i]; tc 2295 drivers/net/ethernet/ti/cpsw.c fifo = cpsw_tc_to_fifo(tc, num_tc); tc 115 drivers/net/ethernet/ti/cpts.c u64 ns = timecounter_cyc2time(&cpts->tc, event->low); tc 235 drivers/net/ethernet/ti/cpts.c timecounter_read(&cpts->tc); tc 250 drivers/net/ethernet/ti/cpts.c timecounter_adjtime(&cpts->tc, delta); tc 263 drivers/net/ethernet/ti/cpts.c ns = timecounter_read(&cpts->tc); tc 281 drivers/net/ethernet/ti/cpts.c timecounter_init(&cpts->tc, &cpts->cc, ns); tc 301 drivers/net/ethernet/ti/cpts.c ts = ns_to_timespec64(timecounter_read(&cpts->tc)); tc 393 drivers/net/ethernet/ti/cpts.c ns = timecounter_cyc2time(&cpts->tc, event->low); tc 462 drivers/net/ethernet/ti/cpts.c timecounter_init(&cpts->tc, &cpts->cc, ktime_to_ns(ktime_get_real())); tc 109 drivers/net/ethernet/ti/cpts.h struct timecounter tc; tc 3798 drivers/net/fddi/defxx.c MODULE_DEVICE_TABLE(tc, dfx_tc_table); tc 1543 drivers/net/fddi/defza.c MODULE_DEVICE_TABLE(tc, fza_tc_table); tc 719 drivers/net/hamradio/scc.c static inline void set_brg(struct scc_channel *scc, unsigned int tc) tc 722 drivers/net/hamradio/scc.c wr(scc,R12,tc & 255); /* brg rate LOW */ tc 723 drivers/net/hamradio/scc.c wr(scc,R13,tc >> 8); /* brg rate HIGH */ tc 186 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c u8 tc; tc 208 drivers/ntb/test/ntb_tool.c struct tool_ctx *tc; tc 233 drivers/ntb/test/ntb_tool.c struct tool_ctx *tc; tc 239 drivers/ntb/test/ntb_tool.c struct tool_ctx *tc; tc 244 drivers/ntb/test/ntb_tool.c struct tool_ctx *tc; tc 291 drivers/ntb/test/ntb_tool.c struct tool_ctx *tc = ctx; tc 296 drivers/ntb/test/ntb_tool.c up = ntb_link_is_up(tc->ntb, &speed, &width); tc 298 drivers/ntb/test/ntb_tool.c dev_dbg(&tc->ntb->dev, "link is %s speed %d width %d\n", tc 301 drivers/ntb/test/ntb_tool.c wake_up(&tc->link_wq); tc 306 drivers/ntb/test/ntb_tool.c struct tool_ctx *tc = ctx; tc 309 drivers/ntb/test/ntb_tool.c db_mask = ntb_db_vector_mask(tc->ntb, vec); tc 310 drivers/ntb/test/ntb_tool.c db_bits = ntb_db_read(tc->ntb); tc 312 drivers/ntb/test/ntb_tool.c dev_dbg(&tc->ntb->dev, "doorbell vec %d mask %#llx bits %#llx\n", tc 315 drivers/ntb/test/ntb_tool.c wake_up(&tc->db_wq); tc 320 drivers/ntb/test/ntb_tool.c struct tool_ctx *tc = ctx; tc 323 drivers/ntb/test/ntb_tool.c msg_sts = ntb_msg_read_sts(tc->ntb); tc 325 drivers/ntb/test/ntb_tool.c dev_dbg(&tc->ntb->dev, "message bits %#llx\n", msg_sts); tc 327 drivers/ntb/test/ntb_tool.c wake_up(&tc->msg_wq); tc 341 drivers/ntb/test/ntb_tool.c static ssize_t tool_fn_read(struct tool_ctx *tc, char __user *ubuf, tc 354 drivers/ntb/test/ntb_tool.c pos = scnprintf(buf, buf_size, "%#llx\n", fn_read(tc->ntb)); tc 359 drivers/ntb/test/ntb_tool.c static ssize_t tool_fn_write(struct tool_ctx *tc, tc 392 drivers/ntb/test/ntb_tool.c ret = fn_set(tc->ntb, bits); tc 397 drivers/ntb/test/ntb_tool.c ret = fn_clear(tc->ntb, bits); tc 413 drivers/ntb/test/ntb_tool.c struct tool_ctx *tc = filep->private_data; tc 417 drivers/ntb/test/ntb_tool.c pos = scnprintf(buf, sizeof(buf), "%d\n", ntb_port_number(tc->ntb)); tc 430 drivers/ntb/test/ntb_tool.c struct tool_ctx *tc = peer->tc; tc 435 drivers/ntb/test/ntb_tool.c ntb_peer_port_number(tc->ntb, peer->pidx)); tc 444 drivers/ntb/test/ntb_tool.c static int tool_init_peers(struct tool_ctx *tc) tc 448 drivers/ntb/test/ntb_tool.c tc->peer_cnt = ntb_peer_port_count(tc->ntb); tc 449 drivers/ntb/test/ntb_tool.c tc->peers = devm_kcalloc(&tc->ntb->dev, tc->peer_cnt, tc 450 drivers/ntb/test/ntb_tool.c sizeof(*tc->peers), GFP_KERNEL); tc 451 drivers/ntb/test/ntb_tool.c if (tc->peers == NULL) tc 454 drivers/ntb/test/ntb_tool.c for (pidx = 0; pidx < tc->peer_cnt; pidx++) { tc 455 drivers/ntb/test/ntb_tool.c tc->peers[pidx].pidx = pidx; tc 456 drivers/ntb/test/ntb_tool.c tc->peers[pidx].tc = tc; tc 470 drivers/ntb/test/ntb_tool.c struct tool_ctx *tc = filep->private_data; tc 479 drivers/ntb/test/ntb_tool.c ret = ntb_link_enable(tc->ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); tc 481 drivers/ntb/test/ntb_tool.c ret = ntb_link_disable(tc->ntb); tc 497 drivers/ntb/test/ntb_tool.c struct tool_ctx *tc = peer->tc; tc 500 drivers/ntb/test/ntb_tool.c if (ntb_link_is_up(tc->ntb, NULL, NULL) & BIT(peer->pidx)) tc 519 drivers/ntb/test/ntb_tool.c struct tool_ctx *tc = peer->tc; tc 530 drivers/ntb/test/ntb_tool.c if (wait_event_interruptible(tc->link_wq, tc 531 drivers/ntb/test/ntb_tool.c !!(ntb_link_is_up(tc->ntb, NULL, NULL) & link_msk) == val)) tc 574 drivers/ntb/test/ntb_tool.c static int tool_setup_mw(struct tool_ctx *tc, int pidx, int widx, tc 578 drivers/ntb/test/ntb_tool.c struct tool_mw *inmw = &tc->peers[pidx].inmws[widx]; tc 585 drivers/ntb/test/ntb_tool.c ret = ntb_mw_get_align(tc->ntb, pidx, widx, &addr_align, tc 593 drivers/ntb/test/ntb_tool.c inmw->mm_base = dma_alloc_coherent(&tc->ntb->dev, inmw->size, tc 603 drivers/ntb/test/ntb_tool.c ret = ntb_mw_set_trans(tc->ntb, pidx, widx, inmw->dma_base, inmw->size); tc 609 drivers/ntb/test/ntb_tool.c tc->peers[pidx].dbgfs_dir, inmw, tc 615 drivers/ntb/test/ntb_tool.c dma_free_coherent(&tc->ntb->dev, inmw->size, inmw->mm_base, tc 624 drivers/ntb/test/ntb_tool.c static void tool_free_mw(struct tool_ctx *tc, int pidx, int widx) tc 626 drivers/ntb/test/ntb_tool.c struct tool_mw *inmw = &tc->peers[pidx].inmws[widx]; tc 631 drivers/ntb/test/ntb_tool.c ntb_mw_clear_trans(tc->ntb, pidx, widx); tc 632 drivers/ntb/test/ntb_tool.c dma_free_coherent(&tc->ntb->dev, inmw->size, tc 659 drivers/ntb/test/ntb_tool.c ret = ntb_mw_get_align(inmw->tc->ntb, inmw->pidx, inmw->widx, tc 670 drivers/ntb/test/ntb_tool.c ntb_peer_port_number(inmw->tc->ntb, inmw->pidx), tc 715 drivers/ntb/test/ntb_tool.c tool_free_mw(inmw->tc, inmw->pidx, inmw->widx); tc 717 drivers/ntb/test/ntb_tool.c ret = tool_setup_mw(inmw->tc, inmw->pidx, inmw->widx, val); tc 809 drivers/ntb/test/ntb_tool.c static int tool_setup_peer_mw(struct tool_ctx *tc, int pidx, int widx, tc 812 drivers/ntb/test/ntb_tool.c struct tool_mw *outmw = &tc->outmws[widx]; tc 821 drivers/ntb/test/ntb_tool.c ret = ntb_peer_mw_get_addr(tc->ntb, widx, &map_base, &map_size); tc 825 drivers/ntb/test/ntb_tool.c ret = ntb_peer_mw_set_trans(tc->ntb, pidx, widx, req_addr, req_size); tc 841 drivers/ntb/test/ntb_tool.c tc->peers[pidx].dbgfs_dir, outmw, tc 847 drivers/ntb/test/ntb_tool.c ntb_peer_mw_clear_trans(tc->ntb, pidx, widx); tc 852 drivers/ntb/test/ntb_tool.c static void tool_free_peer_mw(struct tool_ctx *tc, int widx) tc 854 drivers/ntb/test/ntb_tool.c struct tool_mw *outmw = &tc->outmws[widx]; tc 859 drivers/ntb/test/ntb_tool.c iounmap(tc->outmws[widx].io_base); tc 860 drivers/ntb/test/ntb_tool.c ntb_peer_mw_clear_trans(tc->ntb, outmw->pidx, widx); tc 882 drivers/ntb/test/ntb_tool.c ret = ntb_peer_mw_get_addr(outmw->tc->ntb, outmw->widx, tc 899 drivers/ntb/test/ntb_tool.c ntb_peer_port_number(outmw->tc->ntb, outmw->pidx), tc 948 drivers/ntb/test/ntb_tool.c tool_free_peer_mw(outmw->tc, outmw->widx); tc 950 drivers/ntb/test/ntb_tool.c ret = tool_setup_peer_mw(outmw->tc, outmw_wrap->pidx, tc 963 drivers/ntb/test/ntb_tool.c static int tool_init_mws(struct tool_ctx *tc) tc 968 drivers/ntb/test/ntb_tool.c tc->outmw_cnt = ntb_peer_mw_count(tc->ntb); tc 969 drivers/ntb/test/ntb_tool.c tc->outmws = devm_kcalloc(&tc->ntb->dev, tc->outmw_cnt, tc 970 drivers/ntb/test/ntb_tool.c sizeof(*tc->outmws), GFP_KERNEL); tc 971 drivers/ntb/test/ntb_tool.c if (tc->outmws == NULL) tc 974 drivers/ntb/test/ntb_tool.c for (widx = 0; widx < tc->outmw_cnt; widx++) { tc 975 drivers/ntb/test/ntb_tool.c tc->outmws[widx].widx = widx; tc 976 drivers/ntb/test/ntb_tool.c tc->outmws[widx].pidx = -1; tc 977 drivers/ntb/test/ntb_tool.c tc->outmws[widx].tc = tc; tc 981 drivers/ntb/test/ntb_tool.c for (pidx = 0; pidx < tc->peer_cnt; pidx++) { tc 982 drivers/ntb/test/ntb_tool.c tc->peers[pidx].inmw_cnt = ntb_mw_count(tc->ntb, pidx); tc 983 drivers/ntb/test/ntb_tool.c tc->peers[pidx].inmws = tc 984 drivers/ntb/test/ntb_tool.c devm_kcalloc(&tc->ntb->dev, tc->peers[pidx].inmw_cnt, tc 985 drivers/ntb/test/ntb_tool.c sizeof(*tc->peers[pidx].inmws), GFP_KERNEL); tc 986 drivers/ntb/test/ntb_tool.c if (tc->peers[pidx].inmws == NULL) tc 989 drivers/ntb/test/ntb_tool.c for (widx = 0; widx < tc->peers[pidx].inmw_cnt; widx++) { tc 990 drivers/ntb/test/ntb_tool.c tc->peers[pidx].inmws[widx].widx = widx; tc 991 drivers/ntb/test/ntb_tool.c tc->peers[pidx].inmws[widx].pidx = pidx; tc 992 drivers/ntb/test/ntb_tool.c tc->peers[pidx].inmws[widx].tc = tc; tc 995 drivers/ntb/test/ntb_tool.c tc->peers[pidx].outmw_cnt = ntb_peer_mw_count(tc->ntb); tc 996 drivers/ntb/test/ntb_tool.c tc->peers[pidx].outmws = tc 997 drivers/ntb/test/ntb_tool.c devm_kcalloc(&tc->ntb->dev, tc->peers[pidx].outmw_cnt, tc 998 drivers/ntb/test/ntb_tool.c sizeof(*tc->peers[pidx].outmws), GFP_KERNEL); tc 1000 drivers/ntb/test/ntb_tool.c for (widx = 0; widx < tc->peers[pidx].outmw_cnt; widx++) { tc 1001 drivers/ntb/test/ntb_tool.c tc->peers[pidx].outmws[widx].pidx = pidx; tc 1002 drivers/ntb/test/ntb_tool.c tc->peers[pidx].outmws[widx].mw = &tc->outmws[widx]; tc 1009 drivers/ntb/test/ntb_tool.c static void tool_clear_mws(struct tool_ctx *tc) tc 1014 drivers/ntb/test/ntb_tool.c for (widx = 0; widx < tc->outmw_cnt; widx++) tc 1015 drivers/ntb/test/ntb_tool.c tool_free_peer_mw(tc, widx); tc 1018 drivers/ntb/test/ntb_tool.c for (pidx = 0; pidx < tc->peer_cnt; pidx++) tc 1019 drivers/ntb/test/ntb_tool.c for (widx = 0; widx < tc->peers[pidx].inmw_cnt; widx++) tc 1020 drivers/ntb/test/ntb_tool.c tool_free_mw(tc, pidx, widx); tc 1031 drivers/ntb/test/ntb_tool.c struct tool_ctx *tc = filep->private_data; tc 1033 drivers/ntb/test/ntb_tool.c return tool_fn_read(tc, ubuf, size, offp, tc->ntb->ops->db_read); tc 1039 drivers/ntb/test/ntb_tool.c struct tool_ctx *tc = filep->private_data; tc 1041 drivers/ntb/test/ntb_tool.c return tool_fn_write(tc, ubuf, size, offp, tc->ntb->ops->db_set, tc 1042 drivers/ntb/test/ntb_tool.c tc->ntb->ops->db_clear); tc 1052 drivers/ntb/test/ntb_tool.c struct tool_ctx *tc = filep->private_data; tc 1054 drivers/ntb/test/ntb_tool.c return tool_fn_read(tc, ubuf, size, offp, tc->ntb->ops->db_valid_mask); tc 1064 drivers/ntb/test/ntb_tool.c struct tool_ctx *tc = filep->private_data; tc 1066 drivers/ntb/test/ntb_tool.c return tool_fn_read(tc, ubuf, size, offp, tc->ntb->ops->db_read_mask); tc 1072 drivers/ntb/test/ntb_tool.c struct tool_ctx *tc = filep->private_data; tc 1074 drivers/ntb/test/ntb_tool.c return tool_fn_write(tc, ubuf, size, offp, tc->ntb->ops->db_set_mask, tc 1075 drivers/ntb/test/ntb_tool.c tc->ntb->ops->db_clear_mask); tc 1085 drivers/ntb/test/ntb_tool.c struct tool_ctx *tc = filep->private_data; tc 1087 drivers/ntb/test/ntb_tool.c return tool_fn_read(tc, ubuf, size, offp, tc->ntb->ops->peer_db_read); tc 1093 drivers/ntb/test/ntb_tool.c struct tool_ctx *tc = filep->private_data; tc 1095 drivers/ntb/test/ntb_tool.c return tool_fn_write(tc, ubuf, size, offp, tc->ntb->ops->peer_db_set, tc 1096 drivers/ntb/test/ntb_tool.c tc->ntb->ops->peer_db_clear); tc 1106 drivers/ntb/test/ntb_tool.c struct tool_ctx *tc = filep->private_data; tc 1108 drivers/ntb/test/ntb_tool.c return tool_fn_read(tc, ubuf, size, offp, tc 1109 drivers/ntb/test/ntb_tool.c tc->ntb->ops->peer_db_read_mask); tc 1116 drivers/ntb/test/ntb_tool.c struct tool_ctx *tc = filep->private_data; tc 1118 drivers/ntb/test/ntb_tool.c return tool_fn_write(tc, ubuf, size, offp, tc 1119 drivers/ntb/test/ntb_tool.c tc->ntb->ops->peer_db_set_mask, tc 1120 drivers/ntb/test/ntb_tool.c tc->ntb->ops->peer_db_clear_mask); tc 1131 drivers/ntb/test/ntb_tool.c struct tool_ctx *tc = filep->private_data; tc 1139 drivers/ntb/test/ntb_tool.c if (wait_event_interruptible(tc->db_wq, ntb_db_read(tc->ntb) == val)) tc 1161 drivers/ntb/test/ntb_tool.c if (!spad->tc->ntb->ops->spad_read) tc 1165 drivers/ntb/test/ntb_tool.c ntb_spad_read(spad->tc->ntb, spad->sidx)); tc 1177 drivers/ntb/test/ntb_tool.c if (!spad->tc->ntb->ops->spad_write) { tc 1178 drivers/ntb/test/ntb_tool.c dev_dbg(&spad->tc->ntb->dev, "no spad write fn\n"); tc 1186 drivers/ntb/test/ntb_tool.c ret = ntb_spad_write(spad->tc->ntb, spad->sidx, val); tc 1202 drivers/ntb/test/ntb_tool.c if (!spad->tc->ntb->ops->peer_spad_read) tc 1206 drivers/ntb/test/ntb_tool.c ntb_peer_spad_read(spad->tc->ntb, spad->pidx, spad->sidx)); tc 1218 drivers/ntb/test/ntb_tool.c if (!spad->tc->ntb->ops->peer_spad_write) { tc 1219 drivers/ntb/test/ntb_tool.c dev_dbg(&spad->tc->ntb->dev, "no spad write fn\n"); tc 1227 drivers/ntb/test/ntb_tool.c ret = ntb_peer_spad_write(spad->tc->ntb, spad->pidx, spad->sidx, val); tc 1236 drivers/ntb/test/ntb_tool.c static int tool_init_spads(struct tool_ctx *tc) tc 1241 drivers/ntb/test/ntb_tool.c tc->inspad_cnt = ntb_spad_count(tc->ntb); tc 1242 drivers/ntb/test/ntb_tool.c tc->inspads = devm_kcalloc(&tc->ntb->dev, tc->inspad_cnt, tc 1243 drivers/ntb/test/ntb_tool.c sizeof(*tc->inspads), GFP_KERNEL); tc 1244 drivers/ntb/test/ntb_tool.c if (tc->inspads == NULL) tc 1247 drivers/ntb/test/ntb_tool.c for (sidx = 0; sidx < tc->inspad_cnt; sidx++) { tc 1248 drivers/ntb/test/ntb_tool.c tc->inspads[sidx].sidx = sidx; tc 1249 drivers/ntb/test/ntb_tool.c tc->inspads[sidx].pidx = -1; tc 1250 drivers/ntb/test/ntb_tool.c tc->inspads[sidx].tc = tc; tc 1254 drivers/ntb/test/ntb_tool.c for (pidx = 0; pidx < tc->peer_cnt; pidx++) { tc 1255 drivers/ntb/test/ntb_tool.c tc->peers[pidx].outspad_cnt = ntb_spad_count(tc->ntb); tc 1256 drivers/ntb/test/ntb_tool.c tc->peers[pidx].outspads = tc 1257 drivers/ntb/test/ntb_tool.c devm_kcalloc(&tc->ntb->dev, tc->peers[pidx].outspad_cnt, tc 1258 drivers/ntb/test/ntb_tool.c sizeof(*tc->peers[pidx].outspads), GFP_KERNEL); tc 1259 drivers/ntb/test/ntb_tool.c if (tc->peers[pidx].outspads == NULL) tc 1262 drivers/ntb/test/ntb_tool.c for (sidx = 0; sidx < tc->peers[pidx].outspad_cnt; sidx++) { tc 1263 drivers/ntb/test/ntb_tool.c tc->peers[pidx].outspads[sidx].sidx = sidx; tc 1264 drivers/ntb/test/ntb_tool.c tc->peers[pidx].outspads[sidx].pidx = pidx; tc 1265 drivers/ntb/test/ntb_tool.c tc->peers[pidx].outspads[sidx].tc = tc; tc 1286 drivers/ntb/test/ntb_tool.c data = ntb_msg_read(msg->tc->ntb, &pidx, msg->midx); tc 1309 drivers/ntb/test/ntb_tool.c ret = ntb_peer_msg_write(msg->tc->ntb, msg->pidx, msg->midx, val); tc 1321 drivers/ntb/test/ntb_tool.c struct tool_ctx *tc = filep->private_data; tc 1323 drivers/ntb/test/ntb_tool.c return tool_fn_read(tc, ubuf, size, offp, tc->ntb->ops->msg_read_sts); tc 1329 drivers/ntb/test/ntb_tool.c struct tool_ctx *tc = filep->private_data; tc 1331 drivers/ntb/test/ntb_tool.c return tool_fn_write(tc, ubuf, size, offp, NULL, tc 1332 drivers/ntb/test/ntb_tool.c tc->ntb->ops->msg_clear_sts); tc 1342 drivers/ntb/test/ntb_tool.c struct tool_ctx *tc = filep->private_data; tc 1344 drivers/ntb/test/ntb_tool.c return tool_fn_read(tc, ubuf, size, offp, tc->ntb->ops->msg_inbits); tc 1354 drivers/ntb/test/ntb_tool.c struct tool_ctx *tc = filep->private_data; tc 1356 drivers/ntb/test/ntb_tool.c return tool_fn_read(tc, ubuf, size, offp, tc->ntb->ops->msg_outbits); tc 1366 drivers/ntb/test/ntb_tool.c struct tool_ctx *tc = filep->private_data; tc 1368 drivers/ntb/test/ntb_tool.c return tool_fn_write(tc, ubuf, size, offp, tc 1369 drivers/ntb/test/ntb_tool.c tc->ntb->ops->msg_set_mask, tc 1370 drivers/ntb/test/ntb_tool.c tc->ntb->ops->msg_clear_mask); tc 1381 drivers/ntb/test/ntb_tool.c struct tool_ctx *tc = filep->private_data; tc 1389 drivers/ntb/test/ntb_tool.c if (wait_event_interruptible(tc->msg_wq, tc 1390 drivers/ntb/test/ntb_tool.c ntb_msg_read_sts(tc->ntb) == val)) tc 1400 drivers/ntb/test/ntb_tool.c static int tool_init_msgs(struct tool_ctx *tc) tc 1405 drivers/ntb/test/ntb_tool.c tc->inmsg_cnt = ntb_msg_count(tc->ntb); tc 1406 drivers/ntb/test/ntb_tool.c tc->inmsgs = devm_kcalloc(&tc->ntb->dev, tc->inmsg_cnt, tc 1407 drivers/ntb/test/ntb_tool.c sizeof(*tc->inmsgs), GFP_KERNEL); tc 1408 drivers/ntb/test/ntb_tool.c if (tc->inmsgs == NULL) tc 1411 drivers/ntb/test/ntb_tool.c for (midx = 0; midx < tc->inmsg_cnt; midx++) { tc 1412 drivers/ntb/test/ntb_tool.c tc->inmsgs[midx].midx = midx; tc 1413 drivers/ntb/test/ntb_tool.c tc->inmsgs[midx].pidx = -1; tc 1414 drivers/ntb/test/ntb_tool.c tc->inmsgs[midx].tc = tc; tc 1418 drivers/ntb/test/ntb_tool.c for (pidx = 0; pidx < tc->peer_cnt; pidx++) { tc 1419 drivers/ntb/test/ntb_tool.c tc->peers[pidx].outmsg_cnt = ntb_msg_count(tc->ntb); tc 1420 drivers/ntb/test/ntb_tool.c tc->peers[pidx].outmsgs = tc 1421 drivers/ntb/test/ntb_tool.c devm_kcalloc(&tc->ntb->dev, tc->peers[pidx].outmsg_cnt, tc 1422 drivers/ntb/test/ntb_tool.c sizeof(*tc->peers[pidx].outmsgs), GFP_KERNEL); tc 1423 drivers/ntb/test/ntb_tool.c if (tc->peers[pidx].outmsgs == NULL) tc 1426 drivers/ntb/test/ntb_tool.c for (midx = 0; midx < tc->peers[pidx].outmsg_cnt; midx++) { tc 1427 drivers/ntb/test/ntb_tool.c tc->peers[pidx].outmsgs[midx].midx = midx; tc 1428 drivers/ntb/test/ntb_tool.c tc->peers[pidx].outmsgs[midx].pidx = pidx; tc 1429 drivers/ntb/test/ntb_tool.c tc->peers[pidx].outmsgs[midx].tc = tc; tc 1443 drivers/ntb/test/ntb_tool.c struct tool_ctx *tc; tc 1445 drivers/ntb/test/ntb_tool.c tc = devm_kzalloc(&ntb->dev, sizeof(*tc), GFP_KERNEL); tc 1446 drivers/ntb/test/ntb_tool.c if (tc == NULL) tc 1449 drivers/ntb/test/ntb_tool.c tc->ntb = ntb; tc 1450 drivers/ntb/test/ntb_tool.c init_waitqueue_head(&tc->link_wq); tc 1451 drivers/ntb/test/ntb_tool.c init_waitqueue_head(&tc->db_wq); tc 1452 drivers/ntb/test/ntb_tool.c init_waitqueue_head(&tc->msg_wq); tc 1460 drivers/ntb/test/ntb_tool.c return tc; tc 1463 drivers/ntb/test/ntb_tool.c static void tool_clear_data(struct tool_ctx *tc) tc 1465 drivers/ntb/test/ntb_tool.c wake_up(&tc->link_wq); tc 1466 drivers/ntb/test/ntb_tool.c wake_up(&tc->db_wq); tc 1467 drivers/ntb/test/ntb_tool.c wake_up(&tc->msg_wq); tc 1470 drivers/ntb/test/ntb_tool.c static int tool_init_ntb(struct tool_ctx *tc) tc 1472 drivers/ntb/test/ntb_tool.c return ntb_set_ctx(tc->ntb, tc, &tool_ops); tc 1475 drivers/ntb/test/ntb_tool.c static void tool_clear_ntb(struct tool_ctx *tc) tc 1477 drivers/ntb/test/ntb_tool.c ntb_clear_ctx(tc->ntb); tc 1478 drivers/ntb/test/ntb_tool.c ntb_link_disable(tc->ntb); tc 1481 drivers/ntb/test/ntb_tool.c static void tool_setup_dbgfs(struct tool_ctx *tc) tc 1488 drivers/ntb/test/ntb_tool.c tc->dbgfs_dir = NULL; tc 1492 drivers/ntb/test/ntb_tool.c tc->dbgfs_dir = debugfs_create_dir(dev_name(&tc->ntb->dev), tc 1494 drivers/ntb/test/ntb_tool.c if (!tc->dbgfs_dir) tc 1497 drivers/ntb/test/ntb_tool.c debugfs_create_file("port", 0600, tc->dbgfs_dir, tc 1498 drivers/ntb/test/ntb_tool.c tc, &tool_port_fops); tc 1500 drivers/ntb/test/ntb_tool.c debugfs_create_file("link", 0600, tc->dbgfs_dir, tc 1501 drivers/ntb/test/ntb_tool.c tc, &tool_link_fops); tc 1503 drivers/ntb/test/ntb_tool.c debugfs_create_file("db", 0600, tc->dbgfs_dir, tc 1504 drivers/ntb/test/ntb_tool.c tc, &tool_db_fops); tc 1506 drivers/ntb/test/ntb_tool.c debugfs_create_file("db_valid_mask", 0600, tc->dbgfs_dir, tc 1507 drivers/ntb/test/ntb_tool.c tc, &tool_db_valid_mask_fops); tc 1509 drivers/ntb/test/ntb_tool.c debugfs_create_file("db_mask", 0600, tc->dbgfs_dir, tc 1510 drivers/ntb/test/ntb_tool.c tc, &tool_db_mask_fops); tc 1512 drivers/ntb/test/ntb_tool.c debugfs_create_file("db_event", 0600, tc->dbgfs_dir, tc 1513 drivers/ntb/test/ntb_tool.c tc, &tool_db_event_fops); tc 1515 drivers/ntb/test/ntb_tool.c debugfs_create_file("peer_db", 0600, tc->dbgfs_dir, tc 1516 drivers/ntb/test/ntb_tool.c tc, &tool_peer_db_fops); tc 1518 drivers/ntb/test/ntb_tool.c debugfs_create_file("peer_db_mask", 0600, tc->dbgfs_dir, tc 1519 drivers/ntb/test/ntb_tool.c tc, &tool_peer_db_mask_fops); tc 1521 drivers/ntb/test/ntb_tool.c if (tc->inspad_cnt != 0) { tc 1522 drivers/ntb/test/ntb_tool.c for (sidx = 0; sidx < tc->inspad_cnt; sidx++) { tc 1525 drivers/ntb/test/ntb_tool.c debugfs_create_file(buf, 0600, tc->dbgfs_dir, tc 1526 drivers/ntb/test/ntb_tool.c &tc->inspads[sidx], &tool_spad_fops); tc 1530 drivers/ntb/test/ntb_tool.c if (tc->inmsg_cnt != 0) { tc 1531 drivers/ntb/test/ntb_tool.c for (midx = 0; midx < tc->inmsg_cnt; midx++) { tc 1533 drivers/ntb/test/ntb_tool.c debugfs_create_file(buf, 0600, tc->dbgfs_dir, tc 1534 drivers/ntb/test/ntb_tool.c &tc->inmsgs[midx], &tool_inmsg_fops); tc 1537 drivers/ntb/test/ntb_tool.c debugfs_create_file("msg_sts", 0600, tc->dbgfs_dir, tc 1538 drivers/ntb/test/ntb_tool.c tc, &tool_msg_sts_fops); tc 1540 drivers/ntb/test/ntb_tool.c debugfs_create_file("msg_inbits", 0600, tc->dbgfs_dir, tc 1541 drivers/ntb/test/ntb_tool.c tc, &tool_msg_inbits_fops); tc 1543 drivers/ntb/test/ntb_tool.c debugfs_create_file("msg_outbits", 0600, tc->dbgfs_dir, tc 1544 drivers/ntb/test/ntb_tool.c tc, &tool_msg_outbits_fops); tc 1546 drivers/ntb/test/ntb_tool.c debugfs_create_file("msg_mask", 0600, tc->dbgfs_dir, tc 1547 drivers/ntb/test/ntb_tool.c tc, &tool_msg_mask_fops); tc 1549 drivers/ntb/test/ntb_tool.c debugfs_create_file("msg_event", 0600, tc->dbgfs_dir, tc 1550 drivers/ntb/test/ntb_tool.c tc, &tool_msg_event_fops); tc 1553 drivers/ntb/test/ntb_tool.c for (pidx = 0; pidx < tc->peer_cnt; pidx++) { tc 1555 drivers/ntb/test/ntb_tool.c tc->peers[pidx].dbgfs_dir = tc 1556 drivers/ntb/test/ntb_tool.c debugfs_create_dir(buf, tc->dbgfs_dir); tc 1559 drivers/ntb/test/ntb_tool.c tc->peers[pidx].dbgfs_dir, tc 1560 drivers/ntb/test/ntb_tool.c &tc->peers[pidx], &tool_peer_port_fops); tc 1563 drivers/ntb/test/ntb_tool.c tc->peers[pidx].dbgfs_dir, tc 1564 drivers/ntb/test/ntb_tool.c &tc->peers[pidx], &tool_peer_link_fops); tc 1567 drivers/ntb/test/ntb_tool.c tc->peers[pidx].dbgfs_dir, tc 1568 drivers/ntb/test/ntb_tool.c &tc->peers[pidx], &tool_peer_link_event_fops); tc 1570 drivers/ntb/test/ntb_tool.c for (widx = 0; widx < tc->peers[pidx].inmw_cnt; widx++) { tc 1573 drivers/ntb/test/ntb_tool.c tc->peers[pidx].dbgfs_dir, tc 1574 drivers/ntb/test/ntb_tool.c &tc->peers[pidx].inmws[widx], tc 1578 drivers/ntb/test/ntb_tool.c for (widx = 0; widx < tc->peers[pidx].outmw_cnt; widx++) { tc 1581 drivers/ntb/test/ntb_tool.c tc->peers[pidx].dbgfs_dir, tc 1582 drivers/ntb/test/ntb_tool.c &tc->peers[pidx].outmws[widx], tc 1586 drivers/ntb/test/ntb_tool.c for (sidx = 0; sidx < tc->peers[pidx].outspad_cnt; sidx++) { tc 1590 drivers/ntb/test/ntb_tool.c tc->peers[pidx].dbgfs_dir, tc 1591 drivers/ntb/test/ntb_tool.c &tc->peers[pidx].outspads[sidx], tc 1595 drivers/ntb/test/ntb_tool.c for (midx = 0; midx < tc->peers[pidx].outmsg_cnt; midx++) { tc 1598 drivers/ntb/test/ntb_tool.c tc->peers[pidx].dbgfs_dir, tc 1599 drivers/ntb/test/ntb_tool.c &tc->peers[pidx].outmsgs[midx], tc 1605 drivers/ntb/test/ntb_tool.c static void tool_clear_dbgfs(struct tool_ctx *tc) tc 1607 drivers/ntb/test/ntb_tool.c debugfs_remove_recursive(tc->dbgfs_dir); tc 1612 drivers/ntb/test/ntb_tool.c struct tool_ctx *tc; tc 1615 drivers/ntb/test/ntb_tool.c tc = tool_create_data(ntb); tc 1616 drivers/ntb/test/ntb_tool.c if (IS_ERR(tc)) tc 1617 drivers/ntb/test/ntb_tool.c return PTR_ERR(tc); tc 1619 drivers/ntb/test/ntb_tool.c ret = tool_init_peers(tc); tc 1623 drivers/ntb/test/ntb_tool.c ret = tool_init_mws(tc); tc 1627 drivers/ntb/test/ntb_tool.c ret = tool_init_spads(tc); tc 1631 drivers/ntb/test/ntb_tool.c ret = tool_init_msgs(tc); tc 1635 drivers/ntb/test/ntb_tool.c ret = tool_init_ntb(tc); tc 1639 drivers/ntb/test/ntb_tool.c tool_setup_dbgfs(tc); tc 1644 drivers/ntb/test/ntb_tool.c tool_clear_mws(tc); tc 1647 drivers/ntb/test/ntb_tool.c tool_clear_data(tc); tc 1654 drivers/ntb/test/ntb_tool.c struct tool_ctx *tc = ntb->ctx; tc 1656 drivers/ntb/test/ntb_tool.c tool_clear_dbgfs(tc); tc 1658 drivers/ntb/test/ntb_tool.c tool_clear_ntb(tc); tc 1660 drivers/ntb/test/ntb_tool.c tool_clear_mws(tc); tc 1662 drivers/ntb/test/ntb_tool.c tool_clear_data(tc); tc 51 drivers/pwm/pwm-atmel-tcb.c struct atmel_tc *tc; tc 77 drivers/pwm/pwm-atmel-tcb.c struct atmel_tc *tc = tcbpwmc->tc; tc 78 drivers/pwm/pwm-atmel-tcb.c void __iomem *regs = tc->regs; tc 88 drivers/pwm/pwm-atmel-tcb.c ret = clk_prepare_enable(tc->clk[group]); tc 134 drivers/pwm/pwm-atmel-tcb.c struct atmel_tc *tc = tcbpwmc->tc; tc 136 drivers/pwm/pwm-atmel-tcb.c clk_disable_unprepare(tc->clk[pwm->hwpwm / 2]); tc 145 drivers/pwm/pwm-atmel-tcb.c struct atmel_tc *tc = tcbpwmc->tc; tc 146 drivers/pwm/pwm-atmel-tcb.c void __iomem *regs = tc->regs; tc 204 drivers/pwm/pwm-atmel-tcb.c struct atmel_tc *tc = tcbpwmc->tc; tc 205 drivers/pwm/pwm-atmel-tcb.c void __iomem *regs = tc->regs; tc 291 drivers/pwm/pwm-atmel-tcb.c struct atmel_tc *tc = tcbpwmc->tc; tc 296 drivers/pwm/pwm-atmel-tcb.c unsigned rate = clk_get_rate(tc->clk[group]); tc 310 drivers/pwm/pwm-atmel-tcb.c max = min << tc->tcb_config->counter_width; tc 321 drivers/pwm/pwm-atmel-tcb.c rate = clk_get_rate(tc->slow_clk); tc 323 drivers/pwm/pwm-atmel-tcb.c max = min << tc->tcb_config->counter_width; tc 383 drivers/pwm/pwm-atmel-tcb.c struct atmel_tc *tc; tc 395 drivers/pwm/pwm-atmel-tcb.c tc = atmel_tc_alloc(tcblock); tc 396 drivers/pwm/pwm-atmel-tcb.c if (tc == NULL) { tc 413 drivers/pwm/pwm-atmel-tcb.c tcbpwm->tc = tc; tc 415 drivers/pwm/pwm-atmel-tcb.c err = clk_prepare_enable(tc->slow_clk); tc 430 drivers/pwm/pwm-atmel-tcb.c clk_disable_unprepare(tcbpwm->tc->slow_clk); tc 433 drivers/pwm/pwm-atmel-tcb.c atmel_tc_free(tc); tc 443 drivers/pwm/pwm-atmel-tcb.c clk_disable_unprepare(tcbpwm->tc->slow_clk); tc 449 drivers/pwm/pwm-atmel-tcb.c atmel_tc_free(tcbpwm->tc); tc 464 drivers/pwm/pwm-atmel-tcb.c void __iomem *base = tcbpwm->tc->regs; tc 481 drivers/pwm/pwm-atmel-tcb.c void __iomem *base = tcbpwm->tc->regs; tc 604 drivers/s390/block/dasd_eer.c int tc,rc; tc 626 drivers/s390/block/dasd_eer.c tc = 0; tc 627 drivers/s390/block/dasd_eer.c while (!tc) { tc 628 drivers/s390/block/dasd_eer.c tc = dasd_eer_read_buffer(eerb, (char *) &tailcount, tc 630 drivers/s390/block/dasd_eer.c if (!tc) { tc 646 drivers/s390/block/dasd_eer.c WARN_ON(tc != sizeof(tailcount)); tc 651 drivers/s390/block/dasd_eer.c tc = dasd_eer_read_buffer(eerb, readbuffer, effective_count); tc 652 drivers/s390/block/dasd_eer.c WARN_ON(tc != effective_count); tc 2668 drivers/scsi/initio.c struct target_control *tc; tc 2671 drivers/scsi/initio.c tc = &host->targets[sdev->id]; tc 2673 drivers/scsi/initio.c if (tc->heads) { tc 2674 drivers/scsi/initio.c info_array[0] = tc->heads; tc 2675 drivers/scsi/initio.c info_array[1] = tc->sectors; tc 2676 drivers/scsi/initio.c info_array[2] = (unsigned long)capacity / tc->heads / tc->sectors; tc 2678 drivers/scsi/initio.c if (tc->drv_flags & TCF_DRV_255_63) { tc 2288 drivers/scsi/isci/host.c ireq->tc = &ihost->task_context_table[i]; tc 887 drivers/scsi/isci/port.c struct scu_task_context *tc; tc 890 drivers/scsi/isci/port.c tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)]; tc 891 drivers/scsi/isci/port.c tc->abort = 0; tc 911 drivers/scsi/isci/port.c struct scu_task_context *tc; tc 914 drivers/scsi/isci/port.c tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)]; tc 915 drivers/scsi/isci/port.c tc->abort = 1; tc 78 drivers/scsi/isci/request.c return &ireq->tc->sgl_pair_ab; tc 80 drivers/scsi/isci/request.c return &ireq->tc->sgl_pair_cd; tc 93 drivers/scsi/isci/request.c offset = (void *) &ireq->tc->sgl_pair_ab - tc 97 drivers/scsi/isci/request.c offset = (void *) &ireq->tc->sgl_pair_cd - tc 299 drivers/scsi/isci/request.c struct scu_task_context *tc = ireq->tc; tc 303 drivers/scsi/isci/request.c tc->block_guard_enable = 1; tc 304 drivers/scsi/isci/request.c tc->blk_prot_en = 1; tc 305 drivers/scsi/isci/request.c tc->blk_sz = blk_sz; tc 307 drivers/scsi/isci/request.c tc->blk_prot_func = 0x2; tc 309 drivers/scsi/isci/request.c tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes, tc 313 drivers/scsi/isci/request.c tc->interm_crc_val = 0; tc 315 drivers/scsi/isci/request.c tc->init_crc_seed = 0; tc 316 drivers/scsi/isci/request.c tc->app_tag_verify = 0; tc 317 drivers/scsi/isci/request.c tc->app_tag_gen = 0; tc 318 drivers/scsi/isci/request.c tc->ref_tag_seed_verify = 0; tc 321 drivers/scsi/isci/request.c tc->UD_bytes_immed_val = scmd->device->sector_size; tc 323 drivers/scsi/isci/request.c tc->reserved_DC_0 = 0; tc 326 drivers/scsi/isci/request.c tc->DIF_bytes_immed_val = 8; tc 328 drivers/scsi/isci/request.c tc->reserved_DC_1 = 0; tc 329 drivers/scsi/isci/request.c tc->bgc_blk_sz = scmd->device->sector_size; tc 330 drivers/scsi/isci/request.c tc->reserved_E0_0 = 0; tc 331 drivers/scsi/isci/request.c tc->app_tag_gen_mask = 0; tc 334 drivers/scsi/isci/request.c tc->bgctl = 0; tc 337 drivers/scsi/isci/request.c tc->bgctl_f.op = 0x2; tc 339 drivers/scsi/isci/request.c tc->app_tag_verify_mask = 0; tc 342 drivers/scsi/isci/request.c tc->blk_guard_err = 0; tc 344 drivers/scsi/isci/request.c tc->reserved_E8_0 = 0; tc 347 drivers/scsi/isci/request.c tc->ref_tag_seed_gen = scsi_get_lba(scmd) & 0xffffffff; tc 349 drivers/scsi/isci/request.c tc->ref_tag_seed_gen = 0; tc 354 drivers/scsi/isci/request.c struct scu_task_context *tc = ireq->tc; tc 358 drivers/scsi/isci/request.c tc->block_guard_enable = 1; tc 359 drivers/scsi/isci/request.c tc->blk_prot_en = 1; tc 360 drivers/scsi/isci/request.c tc->blk_sz = blk_sz; tc 362 drivers/scsi/isci/request.c tc->blk_prot_func = 0x1; tc 364 drivers/scsi/isci/request.c tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes, tc 368 drivers/scsi/isci/request.c tc->interm_crc_val = 0; tc 370 drivers/scsi/isci/request.c tc->init_crc_seed = 0; tc 371 drivers/scsi/isci/request.c tc->app_tag_verify = 0; tc 372 drivers/scsi/isci/request.c tc->app_tag_gen = 0; tc 375 drivers/scsi/isci/request.c tc->ref_tag_seed_verify = scsi_get_lba(scmd) & 0xffffffff; tc 377 drivers/scsi/isci/request.c tc->ref_tag_seed_verify = 0; tc 380 drivers/scsi/isci/request.c tc->UD_bytes_immed_val = scmd->device->sector_size; tc 382 drivers/scsi/isci/request.c tc->reserved_DC_0 = 0; tc 385 drivers/scsi/isci/request.c tc->DIF_bytes_immed_val = 8; tc 387 drivers/scsi/isci/request.c tc->reserved_DC_1 = 0; tc 388 drivers/scsi/isci/request.c tc->bgc_blk_sz = scmd->device->sector_size; tc 389 drivers/scsi/isci/request.c tc->reserved_E0_0 = 0; tc 390 drivers/scsi/isci/request.c tc->app_tag_gen_mask = 0; tc 393 drivers/scsi/isci/request.c tc->bgctl = 0; tc 396 drivers/scsi/isci/request.c tc->bgctl_f.crc_verify = 1; tc 397 drivers/scsi/isci/request.c tc->bgctl_f.op = 0x1; tc 399 drivers/scsi/isci/request.c tc->bgctl_f.ref_tag_chk = 1; tc 400 drivers/scsi/isci/request.c tc->bgctl_f.app_f_detect = 1; tc 402 drivers/scsi/isci/request.c tc->bgctl_f.app_ref_f_detect = 1; tc 404 drivers/scsi/isci/request.c tc->app_tag_verify_mask = 0; tc 407 drivers/scsi/isci/request.c tc->blk_guard_err = 0; tc 409 drivers/scsi/isci/request.c tc->reserved_E8_0 = 0; tc 410 drivers/scsi/isci/request.c tc->ref_tag_seed_gen = 0; tc 422 drivers/scsi/isci/request.c struct scu_task_context *task_context = ireq->tc; tc 473 drivers/scsi/isci/request.c struct scu_task_context *task_context = ireq->tc; tc 563 drivers/scsi/isci/request.c struct scu_task_context *task_context = ireq->tc; tc 613 drivers/scsi/isci/request.c struct scu_task_context *task_context = ireq->tc; tc 811 drivers/scsi/isci/request.c struct scu_task_context *tc = ireq->tc; tc 822 drivers/scsi/isci/request.c tc->task_index = ISCI_TAG_TCI(ireq->io_tag); tc 824 drivers/scsi/isci/request.c switch (tc->protocol_type) { tc 828 drivers/scsi/isci/request.c tc->type.ssp.tag = ireq->io_tag; tc 829 drivers/scsi/isci/request.c tc->type.ssp.target_port_transfer_tag = 0xFFFF; tc 1327 drivers/scsi/isci/request.c struct scu_task_context *task_context = ireq->tc; tc 1653 drivers/scsi/isci/request.c struct scu_task_context *task_context = ireq->tc; tc 1670 drivers/scsi/isci/request.c struct scu_task_context *task_context = ireq->tc; tc 3018 drivers/scsi/isci/request.c ireq->tc->abort = 1; tc 3109 drivers/scsi/isci/request.c memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab)); tc 3126 drivers/scsi/isci/request.c memset(ireq->tc, 0, sizeof(struct scu_task_context)); tc 3177 drivers/scsi/isci/request.c ireq->tc->type.stp.ncq_tag = qc->tag; tc 3232 drivers/scsi/isci/request.c task_context = ireq->tc; tc 121 drivers/scsi/isci/request.h struct scu_task_context *tc; tc 3102 drivers/scsi/qla2xxx/qla_init.c void *tc; tc 3120 drivers/scsi/qla2xxx/qla_init.c tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, tc 3122 drivers/scsi/qla2xxx/qla_init.c if (!tc) { tc 3134 drivers/scsi/qla2xxx/qla_init.c dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, tc_dma); tc 3143 drivers/scsi/qla2xxx/qla_init.c ha->fce = tc; tc 3151 drivers/scsi/qla2xxx/qla_init.c void *tc; tc 3165 drivers/scsi/qla2xxx/qla_init.c tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, tc 3167 drivers/scsi/qla2xxx/qla_init.c if (!tc) { tc 3178 drivers/scsi/qla2xxx/qla_init.c dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, tc_dma); tc 3186 drivers/scsi/qla2xxx/qla_init.c ha->eft = tc; tc 888 drivers/scsi/qla2xxx/qla_iocb.c struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) tc 914 drivers/scsi/qla2xxx/qla_iocb.c } else if (tc) { tc 915 drivers/scsi/qla2xxx/qla_iocb.c prot_int = tc->blk_sz; tc 916 drivers/scsi/qla2xxx/qla_iocb.c sgx.tot_bytes = tc->bufflen; tc 917 drivers/scsi/qla2xxx/qla_iocb.c sgx.cur_sg = tc->sg; tc 918 drivers/scsi/qla2xxx/qla_iocb.c sg_prot = tc->prot_sg; tc 962 drivers/scsi/qla2xxx/qla_iocb.c &(tc->ctx->dsd_list)); tc 963 drivers/scsi/qla2xxx/qla_iocb.c *tc->ctx_dsd_alloced = 1; tc 1002 drivers/scsi/qla2xxx/qla_iocb.c struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) tc 1017 drivers/scsi/qla2xxx/qla_iocb.c } else if (tc) { tc 1018 drivers/scsi/qla2xxx/qla_iocb.c sgl = tc->sg; tc 1059 drivers/scsi/qla2xxx/qla_iocb.c &(tc->ctx->dsd_list)); tc 1060 drivers/scsi/qla2xxx/qla_iocb.c *tc->ctx_dsd_alloced = 1; tc 1082 drivers/scsi/qla2xxx/qla_iocb.c struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) tc 1105 drivers/scsi/qla2xxx/qla_iocb.c } else if (tc) { tc 1106 drivers/scsi/qla2xxx/qla_iocb.c vha = tc->vha; tc 1107 drivers/scsi/qla2xxx/qla_iocb.c sgl = tc->prot_sg; tc 1108 drivers/scsi/qla2xxx/qla_iocb.c difctx = tc->ctx; tc 1109 drivers/scsi/qla2xxx/qla_iocb.c direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE; tc 1242 drivers/scsi/qla2xxx/qla_iocb.c tc->prot_flags = DIF_BUNDL_DMA_VALID; tc 1299 drivers/scsi/qla2xxx/qla_iocb.c tc->ctx_dsd_alloced = 1; tc 1357 drivers/scsi/qla2xxx/qla_iocb.c tc->ctx_dsd_alloced = 1; tc 3000 drivers/scsi/qla2xxx/qla_target.c struct qla_tc_param tc; tc 3153 drivers/scsi/qla2xxx/qla_target.c memset((uint8_t *)&tc, 0 , sizeof(tc)); tc 3154 drivers/scsi/qla2xxx/qla_target.c tc.vha = vha; tc 3155 drivers/scsi/qla2xxx/qla_target.c tc.blk_sz = cmd->blk_sz; tc 3156 drivers/scsi/qla2xxx/qla_target.c tc.bufflen = cmd->bufflen; tc 3157 drivers/scsi/qla2xxx/qla_target.c tc.sg = cmd->sg; tc 3158 drivers/scsi/qla2xxx/qla_target.c tc.prot_sg = cmd->prot_sg; tc 3159 drivers/scsi/qla2xxx/qla_target.c tc.ctx = crc_ctx_pkt; tc 3160 drivers/scsi/qla2xxx/qla_target.c tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced; tc 3167 drivers/scsi/qla2xxx/qla_target.c prm->tot_dsds, &tc)) tc 3170 drivers/scsi/qla2xxx/qla_target.c (prm->tot_dsds - prm->prot_seg_cnt), &tc)) tc 47 drivers/scsi/raid_class.c struct transport_container *tc = \ tc 49 drivers/scsi/raid_class.c tc_to_raid_internal(tc); \ tc 77 drivers/scsi/raid_class.c static int raid_setup(struct transport_container *tc, struct device *dev, tc 94 drivers/scsi/raid_class.c static int raid_remove(struct transport_container *tc, struct device *dev, tc 345 drivers/scsi/scsi_transport_fc.c static int fc_target_setup(struct transport_container *tc, struct device *dev, tc 375 drivers/scsi/scsi_transport_fc.c static int fc_host_setup(struct transport_container *tc, struct device *dev, tc 456 drivers/scsi/scsi_transport_fc.c static int fc_host_remove(struct transport_container *tc, struct device *dev, tc 1564 drivers/scsi/scsi_transport_iscsi.c static int iscsi_setup_host(struct transport_container *tc, struct device *dev, tc 1580 drivers/scsi/scsi_transport_iscsi.c static int iscsi_remove_host(struct transport_container *tc, tc 223 drivers/scsi/scsi_transport_sas.c static int sas_host_setup(struct transport_container *tc, struct device *dev, tc 242 drivers/scsi/scsi_transport_sas.c static int sas_host_remove(struct transport_container *tc, struct device *dev, tc 620 drivers/scsi/scsi_transport_sas.c static int sas_phy_setup(struct transport_container *tc, struct device *dev, tc 166 drivers/scsi/scsi_transport_spi.c static int spi_host_setup(struct transport_container *tc, struct device *dev, tc 176 drivers/scsi/scsi_transport_spi.c static int spi_host_configure(struct transport_container *tc, tc 202 drivers/scsi/scsi_transport_spi.c static int spi_target_configure(struct transport_container *tc, tc 206 drivers/scsi/scsi_transport_spi.c static int spi_device_configure(struct transport_container *tc, tc 235 drivers/scsi/scsi_transport_spi.c static int spi_setup_transport_attrs(struct transport_container *tc, tc 1434 drivers/scsi/scsi_transport_spi.c static int spi_host_configure(struct transport_container *tc, tc 1545 drivers/scsi/scsi_transport_spi.c static int spi_target_configure(struct transport_container *tc, tc 104 drivers/scsi/scsi_transport_srp.c static int srp_host_setup(struct transport_container *tc, struct device *dev, tc 26 drivers/staging/fbtft/fb_pcd8544.c static unsigned int tc; tc 27 drivers/staging/fbtft/fb_pcd8544.c module_param(tc, uint, 0000); tc 28 drivers/staging/fbtft/fb_pcd8544.c MODULE_PARM_DESC(tc, "TC[1:0] Temperature coefficient: 0-3 (default: 0)"); tc 53 drivers/staging/fbtft/fb_pcd8544.c write_reg(par, 0x04 | (tc & 0x3)); tc 613 drivers/tty/vt/vt.c int tc = conv_uni_to_pc(vc, uc); tc 614 drivers/tty/vt/vt.c if (tc == -4) tc 615 drivers/tty/vt/vt.c tc = conv_uni_to_pc(vc, 0xfffd); tc 616 drivers/tty/vt/vt.c if (tc == -4) tc 617 drivers/tty/vt/vt.c tc = conv_uni_to_pc(vc, '?'); tc 618 drivers/tty/vt/vt.c if (tc != glyph) tc 621 drivers/tty/vt/vt.c __func__, x, y, glyph, tc); tc 2584 drivers/tty/vt/vt.c int c, next_c, tc, ok, n = 0, draw_x = -1; tc 2637 drivers/tty/vt/vt.c tc = c; tc 2708 drivers/tty/vt/vt.c tc = c; tc 2710 drivers/tty/vt/vt.c tc = vc_translate(vc, c); tc 2713 drivers/tty/vt/vt.c param.c = tc; tc 2728 drivers/tty/vt/vt.c ok = tc && (c >= 32 || tc 2740 drivers/tty/vt/vt.c tc = conv_uni_to_pc(vc, tc); tc 2741 drivers/tty/vt/vt.c if (tc & ~charmask) { tc 2742 drivers/tty/vt/vt.c if (tc == -1 || tc == -2) { tc 2752 drivers/tty/vt/vt.c tc = c; tc 2755 drivers/tty/vt/vt.c tc = conv_uni_to_pc(vc, 0xfffd); tc 2756 drivers/tty/vt/vt.c if (tc < 0) { tc 2758 drivers/tty/vt/vt.c tc = conv_uni_to_pc(vc, '?'); tc 2759 drivers/tty/vt/vt.c if (tc < 0) tc = '?'; tc 2791 drivers/tty/vt/vt.c ((vc_attr << 8) & ~himask) + ((tc & 0x100) ? himask : 0) + (tc & 0xff) : tc 2792 drivers/tty/vt/vt.c (vc_attr << 8) + tc, tc 2808 drivers/tty/vt/vt.c tc = conv_uni_to_pc(vc, ' '); /* A space is printed in the second column */ tc 2809 drivers/tty/vt/vt.c if (tc < 0) tc = ' '; tc 273 drivers/video/fbdev/pmag-aa-fb.c MODULE_DEVICE_TABLE(tc, pmagaafb_tc_table); tc 263 drivers/video/fbdev/pmag-ba-fb.c MODULE_DEVICE_TABLE(tc, pmagbafb_tc_table); tc 381 drivers/video/fbdev/pmagb-b-fb.c MODULE_DEVICE_TABLE(tc, pmagbbfb_tc_table); tc 130 drivers/video/fbdev/tgafb.c MODULE_DEVICE_TABLE(tc, tgafb_tc_table); tc 43 drivers/xen/efi.c static efi_status_t xen_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) tc 55 drivers/xen/efi.c if (tc) { tc 56 drivers/xen/efi.c tc->resolution = efi_data(op).u.get_time.resolution; tc 57 drivers/xen/efi.c tc->accuracy = efi_data(op).u.get_time.accuracy; tc 58 drivers/xen/efi.c tc->sets_to_zero = !!(efi_data(op).misc & tc 401 fs/ntfs/lcnalloc.c LCN tc; tc 407 fs/ntfs/lcnalloc.c tc = lcn + bmp_pos + 1; tc 411 fs/ntfs/lcnalloc.c (unsigned long long)tc, tc 420 fs/ntfs/lcnalloc.c if (tc >= vol->mft_zone_end) { tc 427 fs/ntfs/lcnalloc.c tc > vol->mft_zone_pos) tc 428 fs/ntfs/lcnalloc.c && tc >= vol->mft_lcn) tc 429 fs/ntfs/lcnalloc.c vol->mft_zone_pos = tc; tc 442 fs/ntfs/lcnalloc.c if (tc >= vol->nr_clusters) tc 447 fs/ntfs/lcnalloc.c tc > vol->data1_zone_pos) tc 448 fs/ntfs/lcnalloc.c && tc >= vol->mft_zone_end) tc 449 fs/ntfs/lcnalloc.c vol->data1_zone_pos = tc; tc 462 fs/ntfs/lcnalloc.c if (tc >= vol->mft_zone_start) tc 466 fs/ntfs/lcnalloc.c tc > vol->data2_zone_pos) tc 467 fs/ntfs/lcnalloc.c vol->data2_zone_pos = tc; tc 544 fs/ntfs/lcnalloc.c LCN tc; tc 551 fs/ntfs/lcnalloc.c tc = rl[rlpos - 1].lcn + tc 553 fs/ntfs/lcnalloc.c if (tc >= vol->mft_zone_end) { tc 560 fs/ntfs/lcnalloc.c tc > vol->mft_zone_pos) tc 561 fs/ntfs/lcnalloc.c && tc >= vol->mft_lcn) tc 562 fs/ntfs/lcnalloc.c vol->mft_zone_pos = tc; tc 587 fs/ntfs/lcnalloc.c LCN tc; tc 594 fs/ntfs/lcnalloc.c tc = rl[rlpos - 1].lcn + tc 596 fs/ntfs/lcnalloc.c if (tc >= vol->nr_clusters) tc 601 fs/ntfs/lcnalloc.c tc > vol->data1_zone_pos) tc 602 fs/ntfs/lcnalloc.c && tc >= vol->mft_zone_end) tc 603 fs/ntfs/lcnalloc.c vol->data1_zone_pos = tc; tc 628 fs/ntfs/lcnalloc.c LCN tc; tc 635 fs/ntfs/lcnalloc.c tc = rl[rlpos - 1].lcn + tc 637 fs/ntfs/lcnalloc.c if (tc >= vol->mft_zone_start) tc 641 fs/ntfs/lcnalloc.c tc > vol->data2_zone_pos) tc 642 fs/ntfs/lcnalloc.c vol->data2_zone_pos = tc; tc 352 fs/ntfs/unistr.c unsigned char *tc; tc 354 fs/ntfs/unistr.c tc = kmalloc((ns_len + 64) & tc 356 fs/ntfs/unistr.c if (tc) { tc 357 fs/ntfs/unistr.c memcpy(tc, ns, ns_len); tc 360 fs/ntfs/unistr.c ns = tc; tc 2435 fs/unicode/mkutf8data.c unsigned int tc = 28; tc 2436 fs/unicode/mkutf8data.c unsigned int nc = (vc * tc); tc 2451 fs/unicode/mkutf8data.c unsigned int vi = (si % nc) / tc; tc 2452 fs/unicode/mkutf8data.c unsigned int ti = si % tc; tc 573 include/linux/efi.h typedef efi_status_t efi_get_time_t (efi_time_t *tm, efi_time_cap_t *tc); tc 1015 include/linux/greybus/greybus_protocols.h __u8 tc; tc 655 include/linux/mlx5/driver.h struct timecounter tc; tc 170 include/linux/mlx5/port.h u8 prio, u8 *tc); tc 173 include/linux/mlx5/port.h u8 tc, u8 *tc_group); tc 176 include/linux/mlx5/port.h u8 tc, u8 *bw_pct); tc 2088 include/linux/netdevice.h int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) tc 2090 include/linux/netdevice.h if (tc >= dev->num_tc) tc 2093 include/linux/netdevice.h dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; tc 2099 include/linux/netdevice.h int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset); tc 2112 include/linux/netdevice.h u8 tc, u16 count, u16 offset); tc 57 include/linux/qed/qed_eth_if.h u8 tc; tc 1351 include/linux/qed/qed_if.h #define TX_PI(tc) (RX_PI + 1 + tc) tc 84 include/linux/timecounter.h static inline void timecounter_adjtime(struct timecounter *tc, s64 delta) tc 86 include/linux/timecounter.h tc->nsec += delta; tc 99 include/linux/timecounter.h extern void timecounter_init(struct timecounter *tc, tc 111 include/linux/timecounter.h extern u64 timecounter_read(struct timecounter *tc); tc 127 include/linux/timecounter.h extern u64 timecounter_cyc2time(struct timecounter *tc, tc 84 include/linux/transport_class.h static inline int transport_container_register(struct transport_container *tc) tc 86 include/linux/transport_class.h return attribute_container_register(&tc->ac); tc 89 include/linux/transport_class.h static inline void transport_container_unregister(struct transport_container *tc) tc 91 include/linux/transport_class.h if (unlikely(attribute_container_unregister(&tc->ac))) tc 84 include/net/9p/client.h struct p9_fcall tc; tc 203 include/net/flow_offload.h u8 tc; tc 212 include/net/flow_offload.h u8 tc; tc 48 include/scsi/scsi_transport.h #define transport_class_to_shost(tc) \ tc 49 include/scsi/scsi_transport.h dev_to_shost((tc)->parent) tc 76 include/soc/at91/atmel_tcb.h extern void atmel_tc_free(struct atmel_tc *tc); tc 531 include/sound/hdaudio.h struct timecounter tc; tc 8 kernel/time/timecounter.c void timecounter_init(struct timecounter *tc, tc 12 kernel/time/timecounter.c tc->cc = cc; tc 13 kernel/time/timecounter.c tc->cycle_last = cc->read(cc); tc 14 kernel/time/timecounter.c tc->nsec = start_tstamp; tc 15 kernel/time/timecounter.c tc->mask = (1ULL << cc->shift) - 1; tc 16 kernel/time/timecounter.c tc->frac = 0; tc 31 kernel/time/timecounter.c static u64 timecounter_read_delta(struct timecounter *tc) tc 37 kernel/time/timecounter.c cycle_now = tc->cc->read(tc->cc); tc 40 kernel/time/timecounter.c cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask; tc 43 kernel/time/timecounter.c ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta, tc 44 kernel/time/timecounter.c tc->mask, &tc->frac); tc 47 kernel/time/timecounter.c tc->cycle_last = cycle_now; tc 52 kernel/time/timecounter.c u64 timecounter_read(struct timecounter *tc) tc 57 kernel/time/timecounter.c nsec = timecounter_read_delta(tc); tc 58 kernel/time/timecounter.c nsec += tc->nsec; tc 59 kernel/time/timecounter.c tc->nsec = nsec; tc 79 kernel/time/timecounter.c u64 timecounter_cyc2time(struct timecounter *tc, tc 82 kernel/time/timecounter.c u64 delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask; tc 83 kernel/time/timecounter.c u64 nsec = tc->nsec, frac = tc->frac; tc 90 kernel/time/timecounter.c if (delta > tc->cc->mask / 2) { tc 91 kernel/time/timecounter.c delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask; tc 92 kernel/time/timecounter.c nsec -= cc_cyc2ns_backwards(tc->cc, delta, tc->mask, frac); tc 94 kernel/time/timecounter.c nsec += cyclecounter_cyc2ns(tc->cc, delta, tc->mask, &frac); tc 1181 kernel/trace/blktrace.c int tc = t->action >> BLK_TC_SHIFT; tc 1188 kernel/trace/blktrace.c if (tc & BLK_TC_FLUSH) tc 1191 kernel/trace/blktrace.c if (tc & BLK_TC_DISCARD) tc 1193 kernel/trace/blktrace.c else if (tc & BLK_TC_WRITE) tc 1200 kernel/trace/blktrace.c if (tc & BLK_TC_FUA) tc 1202 kernel/trace/blktrace.c if (tc & BLK_TC_AHEAD) tc 1204 kernel/trace/blktrace.c if (tc & BLK_TC_SYNC) tc 1206 kernel/trace/blktrace.c if (tc & BLK_TC_META) tc 126 lib/test_strscpy.c KSTM_CHECK_ZERO(tc("a", 0, -E2BIG, 0, 0, 0)); tc 127 lib/test_strscpy.c KSTM_CHECK_ZERO(tc("", 0, -E2BIG, 0, 0, 0)); tc 129 lib/test_strscpy.c KSTM_CHECK_ZERO(tc("a", 1, -E2BIG, 0, 1, 0)); tc 130 lib/test_strscpy.c KSTM_CHECK_ZERO(tc("", 1, 0, 0, 1, 0)); tc 132 lib/test_strscpy.c KSTM_CHECK_ZERO(tc("ab", 2, -E2BIG, 1, 1, 0)); tc 133 lib/test_strscpy.c KSTM_CHECK_ZERO(tc("a", 2, 1, 1, 1, 0)); tc 134 lib/test_strscpy.c KSTM_CHECK_ZERO(tc("", 2, 0, 0, 1, 1)); tc 136 lib/test_strscpy.c KSTM_CHECK_ZERO(tc("abc", 3, -E2BIG, 2, 1, 0)); tc 137 lib/test_strscpy.c KSTM_CHECK_ZERO(tc("ab", 3, 2, 2, 1, 0)); tc 138 lib/test_strscpy.c KSTM_CHECK_ZERO(tc("a", 3, 1, 1, 1, 1)); tc 139 lib/test_strscpy.c KSTM_CHECK_ZERO(tc("", 3, 0, 0, 1, 2)); tc 141 lib/test_strscpy.c KSTM_CHECK_ZERO(tc("abcd", 4, -E2BIG, 3, 1, 0)); tc 142 lib/test_strscpy.c KSTM_CHECK_ZERO(tc("abc", 4, 3, 3, 1, 0)); tc 143 lib/test_strscpy.c KSTM_CHECK_ZERO(tc("ab", 4, 2, 2, 1, 1)); tc 144 lib/test_strscpy.c KSTM_CHECK_ZERO(tc("a", 4, 1, 1, 1, 2)); tc 145 lib/test_strscpy.c KSTM_CHECK_ZERO(tc("", 4, 0, 0, 1, 3)); tc 1020 net/6lowpan/iphc.c u8 tc = lowpan_iphc_get_tc(hdr), tf[4], val; tc 1023 net/6lowpan/iphc.c pr_debug("tc 0x%02x\n", tc); tc 1026 net/6lowpan/iphc.c if (!tc) { tc 1037 net/6lowpan/iphc.c lowpan_push_hc_data(hc_ptr, &tc, sizeof(tc)); tc 1042 net/6lowpan/iphc.c if (!(tc & 0x3f)) { tc 1055 net/6lowpan/iphc.c tf[0] |= (tc & 0xc0); tc 1068 net/6lowpan/iphc.c memcpy(&tf[0], &tc, sizeof(tc)); tc 277 net/9p/client.c if (p9_fcall_init(c, &req->tc, alloc_msize)) tc 282 net/9p/client.c p9pdu_reset(&req->tc); tc 296 net/9p/client.c req->tc.tag = tag; tc 317 net/9p/client.c p9_fcall_fini(&req->tc); tc 347 net/9p/client.c if (req->tc.tag != tag) { tc 368 net/9p/client.c u16 tag = r->tc.tag; tc 380 net/9p/client.c p9_fcall_fini(&r->tc); tc 408 net/9p/client.c req->tc.tag); tc 421 net/9p/client.c p9_debug(P9_DEBUG_MUX, " tag %d\n", req->tc.tag); tc 431 net/9p/client.c p9_debug(P9_DEBUG_MUX, "wakeup: %d\n", req->tc.tag); tc 659 net/9p/client.c err = p9_parse_header(&oldreq->tc, NULL, NULL, &oldtag, 1); tc 704 net/9p/client.c p9pdu_prepare(&req->tc, req->tc.tag, type); tc 705 net/9p/client.c err = p9pdu_vwritef(&req->tc, c->proto_version, fmt, ap); tc 708 net/9p/client.c p9pdu_finalize(c, &req->tc); tc 709 net/9p/client.c trace_9p_client_req(c, type, req->tc.tag); tc 464 net/9p/trans_fd.c m->wbuf = req->tc.sdata; tc 465 net/9p/trans_fd.c m->wsize = req->tc.size; tc 663 net/9p/trans_fd.c m, current, &req->tc, req->tc.id); tc 349 net/9p/trans_rdma.c c->busa, c->req->tc.size, tc 480 net/9p/trans_rdma.c c->req->tc.sdata, c->req->tc.size, tc 490 net/9p/trans_rdma.c sge.length = c->req->tc.size; tc 268 net/9p/trans_virtio.c VIRTQUEUE_NUM, req->tc.sdata, req->tc.size); tc 414 net/9p/trans_virtio.c memcpy(&req->tc.sdata[req->tc.size - 4], &v, 4); tc 421 net/9p/trans_virtio.c sz = cpu_to_le32(req->tc.size + outlen); tc 422 net/9p/trans_virtio.c memcpy(&req->tc.sdata[0], &sz, sizeof(sz)); tc 433 net/9p/trans_virtio.c memcpy(&req->tc.sdata[req->tc.size - 4], &v, 4); tc 445 net/9p/trans_virtio.c VIRTQUEUE_NUM, req->tc.sdata, req->tc.size); tc 144 net/9p/trans_xen.c u32 size = p9_req->tc.size; tc 157 net/9p/trans_xen.c num = p9_req->tc.tag % priv->num_rings; tc 179 net/9p/trans_xen.c xen_9pfs_write_packet(ring->data.out, p9_req->tc.sdata, size, tc 2019 net/core/dev.c struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; tc 2022 net/core/dev.c if (tc->offset + tc->count > txq) { tc 2032 net/core/dev.c tc = &dev->tc_to_txq[q]; tc 2033 net/core/dev.c if (tc->offset + tc->count > txq) { tc 2044 net/core/dev.c struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; tc 2048 net/core/dev.c for (i = 0; i < TC_MAX_QUEUE; i++, tc++) { tc 2049 net/core/dev.c if ((txq - tc->offset) < tc->count) tc 2246 net/core/dev.c int maps_sz, num_tc = 1, tc = 0; tc 2260 net/core/dev.c tc = netdev_txq_to_tc(dev, index); tc 2261 net/core/dev.c if (tc < 0) tc 2293 net/core/dev.c tci = j * num_tc + tc; tc 2317 net/core/dev.c for (i = tc, tci = j * num_tc; dev_maps && i--; tci++) { tc 2326 net/core/dev.c tci = j * num_tc + tc; tc 2354 net/core/dev.c for (i = num_tc - tc, tci++; dev_maps && --i; tci++) { tc 2400 net/core/dev.c for (i = tc, tci = j * num_tc; i--; tci++) tc 2405 net/core/dev.c for (i = num_tc - tc, tci++; --i; tci++) tc 2477 net/core/dev.c int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset) tc 2479 net/core/dev.c if (tc >= dev->num_tc) tc 2485 net/core/dev.c dev->tc_to_txq[tc].count = count; tc 2486 net/core/dev.c dev->tc_to_txq[tc].offset = offset; tc 2526 net/core/dev.c u8 tc, u16 count, u16 offset) tc 2529 net/core/dev.c if (sb_dev->num_tc >= 0 || tc >= dev->num_tc) tc 2537 net/core/dev.c sb_dev->tc_to_txq[tc].count = count; tc 2538 net/core/dev.c sb_dev->tc_to_txq[tc].offset = offset; tc 2790 net/core/dev.c u8 tc = netdev_get_prio_tc_map(dev, skb->priority); tc 2792 net/core/dev.c qoffset = sb_dev->tc_to_txq[tc].offset; tc 2793 net/core/dev.c qcount = sb_dev->tc_to_txq[tc].count; tc 1058 net/core/net-sysfs.c int tc; tc 1068 net/core/net-sysfs.c tc = netdev_txq_to_tc(dev, index); tc 1069 net/core/net-sysfs.c if (tc < 0) tc 1079 net/core/net-sysfs.c return dev->num_tc < 0 ? sprintf(buf, "%u%d\n", tc, dev->num_tc) : tc 1080 net/core/net-sysfs.c sprintf(buf, "%u\n", tc); tc 1239 net/core/net-sysfs.c int cpu, len, num_tc = 1, tc = 0; tc 1258 net/core/net-sysfs.c tc = netdev_txq_to_tc(dev, index); tc 1259 net/core/net-sysfs.c if (tc < 0) tc 1270 net/core/net-sysfs.c int i, tci = cpu * num_tc + tc; tc 1332 net/core/net-sysfs.c int j, len, num_tc = 1, tc = 0; tc 1338 net/core/net-sysfs.c tc = netdev_txq_to_tc(dev, index); tc 1339 net/core/net-sysfs.c if (tc < 0) tc 1353 net/core/net-sysfs.c int i, tci = j * num_tc + tc; tc 980 net/ipv6/datagram.c int tc; tc 986 net/ipv6/datagram.c tc = *(int *)CMSG_DATA(cmsg); tc 987 net/ipv6/datagram.c if (tc < -1 || tc > 0xff) tc 991 net/ipv6/datagram.c ipc6->tclass = tc; tc 1744 net/mpls/af_mpls.c if (dec.tc) { tc 14 net/mpls/internal.h u8 tc; tc 175 net/mpls/internal.h static inline struct mpls_shim_hdr mpls_entry_encode(u32 label, unsigned ttl, unsigned tc, bool bos) tc 180 net/mpls/internal.h (tc << MPLS_LS_TC_SHIFT) | tc 193 net/mpls/internal.h result.tc = (entry & MPLS_LS_TC_MASK) >> MPLS_LS_TC_SHIFT; tc 102 net/rds/tcp.c u32 rds_tcp_write_seq(struct rds_tcp_connection *tc) tc 105 net/rds/tcp.c return tcp_sk(tc->t_sock->sk)->write_seq; tc 108 net/rds/tcp.c u32 rds_tcp_snd_una(struct rds_tcp_connection *tc) tc 110 net/rds/tcp.c return tcp_sk(tc->t_sock->sk)->snd_una; tc 114 net/rds/tcp.c struct rds_tcp_connection *tc) tc 116 net/rds/tcp.c rdsdebug("restoring sock %p callbacks from tc %p\n", sock, tc); tc 121 net/rds/tcp.c list_del_init(&tc->t_list_item); tc 125 net/rds/tcp.c if (!tc->t_cpath->cp_conn->c_isv6) tc 129 net/rds/tcp.c tc->t_sock = NULL; tc 131 net/rds/tcp.c sock->sk->sk_write_space = tc->t_orig_write_space; tc 132 net/rds/tcp.c sock->sk->sk_data_ready = tc->t_orig_data_ready; tc 133 net/rds/tcp.c sock->sk->sk_state_change = tc->t_orig_state_change; tc 151 net/rds/tcp.c struct rds_tcp_connection *tc = cp->cp_transport_data; tc 152 net/rds/tcp.c struct socket *osock = tc->t_sock; tc 183 net/rds/tcp.c if (tc->t_tinc) { tc 184 net/rds/tcp.c rds_inc_put(&tc->t_tinc->ti_inc); tc 185 net/rds/tcp.c tc->t_tinc = NULL; tc 187 net/rds/tcp.c tc->t_tinc_hdr_rem = sizeof(struct rds_header); tc 188 net/rds/tcp.c tc->t_tinc_data_rem = 0; tc 189 net/rds/tcp.c rds_tcp_restore_callbacks(osock, tc); tc 205 net/rds/tcp.c struct rds_tcp_connection *tc = cp->cp_transport_data; tc 207 net/rds/tcp.c rdsdebug("setting sock %p callbacks to tc %p\n", sock, tc); tc 212 net/rds/tcp.c list_add_tail(&tc->t_list_item, &rds_tcp_tc_list); tc 216 net/rds/tcp.c if (!tc->t_cpath->cp_conn->c_isv6) tc 224 net/rds/tcp.c tc->t_sock = sock; tc 225 net/rds/tcp.c tc->t_cpath = cp; tc 226 net/rds/tcp.c tc->t_orig_data_ready = sock->sk->sk_data_ready; tc 227 net/rds/tcp.c tc->t_orig_write_space = sock->sk->sk_write_space; tc 228 net/rds/tcp.c tc->t_orig_state_change = sock->sk->sk_state_change; tc 246 net/rds/tcp.c struct rds_tcp_connection *tc; tc 254 net/rds/tcp.c list_for_each_entry(tc, &rds_tcp_tc_list, t_list_item) { tc 255 net/rds/tcp.c struct inet_sock *inet = inet_sk(tc->t_sock->sk); tc 257 net/rds/tcp.c if (tc->t_cpath->cp_conn->c_isv6) tc 265 net/rds/tcp.c tsinfo.hdr_rem = tc->t_tinc_hdr_rem; tc 266 net/rds/tcp.c tsinfo.data_rem = tc->t_tinc_data_rem; tc 267 net/rds/tcp.c tsinfo.last_sent_nxt = tc->t_last_sent_nxt; tc 268 net/rds/tcp.c tsinfo.last_expected_una = tc->t_last_expected_una; tc 269 net/rds/tcp.c tsinfo.last_seen_una = tc->t_last_seen_una; tc 270 net/rds/tcp.c tsinfo.tos = tc->t_cpath->cp_conn->c_tos; tc 292 net/rds/tcp.c struct rds_tcp_connection *tc; tc 300 net/rds/tcp.c list_for_each_entry(tc, &rds_tcp_tc_list, t_list_item) { tc 301 net/rds/tcp.c struct sock *sk = tc->t_sock->sk; tc 309 net/rds/tcp.c tsinfo6.hdr_rem = tc->t_tinc_hdr_rem; tc 310 net/rds/tcp.c tsinfo6.data_rem = tc->t_tinc_data_rem; tc 311 net/rds/tcp.c tsinfo6.last_sent_nxt = tc->t_last_sent_nxt; tc 312 net/rds/tcp.c tsinfo6.last_expected_una = tc->t_last_expected_una; tc 313 net/rds/tcp.c tsinfo6.last_seen_una = tc->t_last_seen_una; tc 363 net/rds/tcp.c struct rds_tcp_connection *tc = arg; tc 366 net/rds/tcp.c rdsdebug("freeing tc %p\n", tc); tc 369 net/rds/tcp.c if (!tc->t_tcp_node_detached) tc 370 net/rds/tcp.c list_del(&tc->t_tcp_node); tc 373 net/rds/tcp.c kmem_cache_free(rds_tcp_conn_slab, tc); tc 378 net/rds/tcp.c struct rds_tcp_connection *tc; tc 383 net/rds/tcp.c tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp); tc 384 net/rds/tcp.c if (!tc) { tc 388 net/rds/tcp.c mutex_init(&tc->t_conn_path_lock); tc 389 net/rds/tcp.c tc->t_sock = NULL; tc 390 net/rds/tcp.c tc->t_tinc = NULL; tc 391 net/rds/tcp.c tc->t_tinc_hdr_rem = sizeof(struct rds_header); tc 392 net/rds/tcp.c tc->t_tinc_data_rem = 0; tc 394 net/rds/tcp.c conn->c_path[i].cp_transport_data = tc; tc 395 net/rds/tcp.c tc->t_cpath = &conn->c_path[i]; tc 396 net/rds/tcp.c tc->t_tcp_node_detached = true; tc 403 net/rds/tcp.c tc = conn->c_path[i].cp_transport_data; tc 404 net/rds/tcp.c tc->t_tcp_node_detached = false; tc 405 net/rds/tcp.c list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list); tc 418 net/rds/tcp.c struct rds_tcp_connection *tc, *_tc; tc 420 net/rds/tcp.c list_for_each_entry_safe(tc, _tc, list, t_tcp_node) { tc 421 net/rds/tcp.c if (tc->t_cpath->cp_conn == conn) tc 439 net/rds/tcp.c struct rds_tcp_connection *tc, *_tc; tc 444 net/rds/tcp.c list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { tc 445 net/rds/tcp.c if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) tc 446 net/rds/tcp.c list_move_tail(&tc->t_tcp_node, &tmp_list); tc 450 net/rds/tcp.c list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) tc 451 net/rds/tcp.c rds_conn_destroy(tc->t_cpath->cp_conn); tc 600 net/rds/tcp.c struct rds_tcp_connection *tc, *_tc; tc 608 net/rds/tcp.c list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { tc 609 net/rds/tcp.c struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); tc 613 net/rds/tcp.c if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) { tc 614 net/rds/tcp.c list_move_tail(&tc->t_tcp_node, &tmp_list); tc 616 net/rds/tcp.c list_del(&tc->t_tcp_node); tc 617 net/rds/tcp.c tc->t_tcp_node_detached = true; tc 621 net/rds/tcp.c list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) tc 622 net/rds/tcp.c rds_conn_destroy(tc->t_cpath->cp_conn); tc 663 net/rds/tcp.c struct rds_tcp_connection *tc, *_tc; tc 666 net/rds/tcp.c list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { tc 667 net/rds/tcp.c struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); tc 669 net/rds/tcp.c if (net != c_net || !tc->t_sock) tc 673 net/rds/tcp.c rds_conn_path_drop(tc->t_cpath, false); tc 57 net/rds/tcp.h struct rds_tcp_connection *tc); tc 58 net/rds/tcp.h u32 rds_tcp_write_seq(struct rds_tcp_connection *tc); tc 59 net/rds/tcp.h u32 rds_tcp_snd_una(struct rds_tcp_connection *tc); tc 60 net/rds/tcp.h u64 rds_tcp_map_seq(struct rds_tcp_connection *tc, u32 seq); tc 44 net/rds/tcp_connect.c struct rds_tcp_connection *tc; tc 52 net/rds/tcp_connect.c tc = cp->cp_transport_data; tc 53 net/rds/tcp_connect.c state_change = tc->t_orig_state_change; tc 55 net/rds/tcp_connect.c rdsdebug("sock %p state_change to %d\n", tc->t_sock, sk->sk_state); tc 99 net/rds/tcp_connect.c struct rds_tcp_connection *tc = cp->cp_transport_data; tc 107 net/rds/tcp_connect.c mutex_lock(&tc->t_conn_path_lock); tc 110 net/rds/tcp_connect.c mutex_unlock(&tc->t_conn_path_lock); tc 185 net/rds/tcp_connect.c mutex_unlock(&tc->t_conn_path_lock); tc 202 net/rds/tcp_connect.c struct rds_tcp_connection *tc = cp->cp_transport_data; tc 203 net/rds/tcp_connect.c struct socket *sock = tc->t_sock; tc 206 net/rds/tcp_connect.c cp->cp_conn, tc, sock); tc 213 net/rds/tcp_connect.c rds_tcp_restore_callbacks(sock, tc); /* tc->tc_sock = NULL */ tc 219 net/rds/tcp_connect.c if (tc->t_tinc) { tc 220 net/rds/tcp_connect.c rds_inc_put(&tc->t_tinc->ti_inc); tc 221 net/rds/tcp_connect.c tc->t_tinc = NULL; tc 223 net/rds/tcp_connect.c tc->t_tinc_hdr_rem = sizeof(struct rds_header); tc 224 net/rds/tcp_connect.c tc->t_tinc_data_rem = 0; tc 159 net/rds/tcp_recv.c struct rds_tcp_connection *tc = cp->cp_transport_data; tc 160 net/rds/tcp_recv.c struct rds_tcp_incoming *tinc = tc->t_tinc; tc 164 net/rds/tcp_recv.c rdsdebug("tcp data tc %p skb %p offset %u len %zu\n", tc, skb, offset, tc 179 net/rds/tcp_recv.c tc->t_tinc = tinc; tc 193 net/rds/tcp_recv.c if (left && tc->t_tinc_hdr_rem) { tc 194 net/rds/tcp_recv.c to_copy = min(tc->t_tinc_hdr_rem, left); tc 200 net/rds/tcp_recv.c tc->t_tinc_hdr_rem, tc 202 net/rds/tcp_recv.c tc->t_tinc_hdr_rem -= to_copy; tc 206 net/rds/tcp_recv.c if (tc->t_tinc_hdr_rem == 0) { tc 208 net/rds/tcp_recv.c tc->t_tinc_data_rem = tc 215 net/rds/tcp_recv.c if (left && tc->t_tinc_data_rem) { tc 216 net/rds/tcp_recv.c to_copy = min(tc->t_tinc_data_rem, left); tc 231 net/rds/tcp_recv.c tc->t_tinc_data_rem -= to_copy; tc 236 net/rds/tcp_recv.c if (tc->t_tinc_hdr_rem == 0 && tc->t_tinc_data_rem == 0) { tc 247 net/rds/tcp_recv.c tc->t_tinc_hdr_rem = sizeof(struct rds_header); tc 248 net/rds/tcp_recv.c tc->t_tinc_data_rem = 0; tc 249 net/rds/tcp_recv.c tc->t_tinc = NULL; tc 257 net/rds/tcp_recv.c skb_queue_len(&tc->t_sock->sk->sk_receive_queue)); tc 264 net/rds/tcp_recv.c struct rds_tcp_connection *tc = cp->cp_transport_data; tc 265 net/rds/tcp_recv.c struct socket *sock = tc->t_sock; tc 277 net/rds/tcp_recv.c rdsdebug("tcp_read_sock for tc %p gfp 0x%x returned %d\n", tc, gfp, tc 292 net/rds/tcp_recv.c struct rds_tcp_connection *tc = cp->cp_transport_data; tc 293 net/rds/tcp_recv.c struct socket *sock = tc->t_sock; tc 297 net/rds/tcp_recv.c cp->cp_index, tc, sock); tc 310 net/rds/tcp_recv.c struct rds_tcp_connection *tc; tc 321 net/rds/tcp_recv.c tc = cp->cp_transport_data; tc 322 net/rds/tcp_recv.c ready = tc->t_orig_data_ready; tc 48 net/rds/tcp_send.c struct rds_tcp_connection *tc = cp->cp_transport_data; tc 50 net/rds/tcp_send.c rds_tcp_cork(tc->t_sock, 1); tc 55 net/rds/tcp_send.c struct rds_tcp_connection *tc = cp->cp_transport_data; tc 57 net/rds/tcp_send.c rds_tcp_cork(tc->t_sock, 0); tc 79 net/rds/tcp_send.c struct rds_tcp_connection *tc = cp->cp_transport_data; tc 89 net/rds/tcp_send.c tc->t_last_sent_nxt = rds_tcp_write_seq(tc); tc 90 net/rds/tcp_send.c rm->m_ack_seq = tc->t_last_sent_nxt + tc 95 net/rds/tcp_send.c tc->t_last_expected_una = rm->m_ack_seq + 1; tc 101 net/rds/tcp_send.c rm, rds_tcp_write_seq(tc), tc 107 net/rds/tcp_send.c set_bit(SOCK_NOSPACE, &tc->t_sock->sk->sk_socket->flags); tc 109 net/rds/tcp_send.c ret = rds_tcp_sendmsg(tc->t_sock, tc 123 net/rds/tcp_send.c ret = tc->t_sock->ops->sendpage(tc->t_sock, tc 187 net/rds/tcp_send.c struct rds_tcp_connection *tc; tc 196 net/rds/tcp_send.c tc = cp->cp_transport_data; tc 197 net/rds/tcp_send.c rdsdebug("write_space for tc %p\n", tc); tc 198 net/rds/tcp_send.c write_space = tc->t_orig_write_space; tc 201 net/rds/tcp_send.c rdsdebug("tcp una %u\n", rds_tcp_snd_una(tc)); tc 202 net/rds/tcp_send.c tc->t_last_seen_una = rds_tcp_snd_una(tc); tc 203 net/rds/tcp_send.c rds_send_path_drop_acked(cp, rds_tcp_snd_una(tc), rds_tcp_is_acked); tc 3547 net/sched/cls_api.c entry->mpls_push.tc = tcf_mpls_tc(act); tc 3558 net/sched/cls_api.c entry->mpls_mangle.tc = tcf_mpls_tc(act); tc 766 net/sched/cls_flower.c u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]); tc 768 net/sched/cls_flower.c if (tc & ~MPLS_TC_MASK) tc 770 net/sched/cls_flower.c key_val->mpls_tc = tc; tc 342 net/sched/cls_u32.c struct tc_u_common *tc; tc 343 net/sched/cls_u32.c hlist_for_each_entry(tc, tc_u_hash(key), hnode) { tc 344 net/sched/cls_u32.c if (tc->ptr == key) tc 345 net/sched/cls_u32.c return tc; tc 390 net/sched/sch_mqprio.c unsigned int ntx, tc; tc 432 net/sched/sch_mqprio.c for (tc = 0; tc < netdev_get_num_tc(dev); tc++) { tc 433 net/sched/sch_mqprio.c opt.count[tc] = dev->tc_to_txq[tc].count; tc 434 net/sched/sch_mqprio.c opt.offset[tc] = dev->tc_to_txq[tc].offset; tc 494 net/sched/sch_mqprio.c int tc = netdev_txq_to_tc(dev, cl - 1); tc 496 net/sched/sch_mqprio.c tcm->tcm_parent = (tc < 0) ? 0 : tc 498 net/sched/sch_mqprio.c TC_H_MIN(tc + TC_H_MIN_PRIORITY)); tc 519 net/sched/sch_mqprio.c struct netdev_tc_txq tc = dev->tc_to_txq[cl & TC_BITMASK]; tc 529 net/sched/sch_mqprio.c for (i = tc.offset; i < tc.offset + tc.count; i++) { tc 200 net/sched/sch_taprio.c int tc, n; tc 202 net/sched/sch_taprio.c tc = netdev_get_prio_tc_map(dev, skb->priority); tc 224 net/sched/sch_taprio.c if (!(entry->gate_mask & BIT(tc)) || tc 461 net/sched/sch_taprio.c u8 tc; tc 474 net/sched/sch_taprio.c tc = netdev_get_prio_tc_map(dev, prio); tc 476 net/sched/sch_taprio.c if (!(gate_mask & BIT(tc))) tc 548 net/sched/sch_taprio.c u8 tc; tc 565 net/sched/sch_taprio.c tc = netdev_get_prio_tc_map(dev, prio); tc 567 net/sched/sch_taprio.c if (!(gate_mask & BIT(tc))) { tc 523 sound/hda/hdac_stream.c struct timecounter *tc = &azx_dev->tc; tc 544 sound/hda/hdac_stream.c timecounter_init(tc, cc, nsec); tc 550 sound/hda/hdac_stream.c tc->cycle_last = last; tc 579 sound/hda/hdac_stream.c cycle_last = s->tc.cycle_last; tc 509 sound/pci/hda/hda_controller.c nsec = timecounter_read(&azx_dev->core.tc); tc 4109 sound/pci/rme9652/hdspm.c unsigned int tc[4] = { 0, 0, 0, 0}; tc 4113 sound/pci/rme9652/hdspm.c tc[2] |= HDSPM_TCO2_set_input_MSB; tc 4116 sound/pci/rme9652/hdspm.c tc[2] |= HDSPM_TCO2_set_input_LSB; tc 4124 sound/pci/rme9652/hdspm.c tc[1] |= HDSPM_TCO1_LTC_Format_LSB; tc 4127 sound/pci/rme9652/hdspm.c tc[1] |= HDSPM_TCO1_LTC_Format_MSB; tc 4130 sound/pci/rme9652/hdspm.c tc[1] |= HDSPM_TCO1_LTC_Format_MSB + tc 4134 sound/pci/rme9652/hdspm.c tc[1] |= HDSPM_TCO1_LTC_Format_LSB + tc 4138 sound/pci/rme9652/hdspm.c tc[1] |= HDSPM_TCO1_LTC_Format_LSB + tc 4148 sound/pci/rme9652/hdspm.c tc[2] |= HDSPM_TCO2_WCK_IO_ratio_LSB; tc 4151 sound/pci/rme9652/hdspm.c tc[2] |= HDSPM_TCO2_WCK_IO_ratio_MSB; tc 4159 sound/pci/rme9652/hdspm.c tc[2] |= HDSPM_TCO2_set_freq; tc 4162 sound/pci/rme9652/hdspm.c tc[2] |= HDSPM_TCO2_set_freq_from_app; tc 4170 sound/pci/rme9652/hdspm.c tc[2] |= HDSPM_TCO2_set_pull_up; tc 4173 sound/pci/rme9652/hdspm.c tc[2] |= HDSPM_TCO2_set_pull_down; tc 4176 sound/pci/rme9652/hdspm.c tc[2] |= HDSPM_TCO2_set_pull_up + HDSPM_TCO2_set_01_4; tc 4179 sound/pci/rme9652/hdspm.c tc[2] |= HDSPM_TCO2_set_pull_down + HDSPM_TCO2_set_01_4; tc 4186 sound/pci/rme9652/hdspm.c tc[2] |= HDSPM_TCO2_set_term_75R; tc 4189 sound/pci/rme9652/hdspm.c hdspm_write(hdspm, HDSPM_WR_TCO, tc[0]); tc 4190 sound/pci/rme9652/hdspm.c hdspm_write(hdspm, HDSPM_WR_TCO+4, tc[1]); tc 4191 sound/pci/rme9652/hdspm.c hdspm_write(hdspm, HDSPM_WR_TCO+8, tc[2]); tc 4192 sound/pci/rme9652/hdspm.c hdspm_write(hdspm, HDSPM_WR_TCO+12, tc[3]); tc 1262 sound/soc/intel/skylake/skl-pcm.c nsec = timecounter_read(&hstr->tc); tc 715 sound/soc/soc-topology.c struct snd_kcontrol_new *kc, struct snd_soc_tplg_ctl_hdr *tc) tc 718 sound/soc/soc-topology.c u32 access = le32_to_cpu(tc->access); tc 724 sound/soc/soc-topology.c tplg_tlv = &tc->tlv; tc 64 tools/perf/arch/x86/tests/perf-time-to-tsc.c struct perf_tsc_conversion tc; tc 97 tools/perf/arch/x86/tests/perf-time-to-tsc.c ret = perf_read_tsc_conversion(pc, &tc); tc 150 tools/perf/arch/x86/tests/perf-time-to-tsc.c test_time = tsc_to_perf_time(test_tsc, &tc); tc 151 tools/perf/arch/x86/tests/perf-time-to-tsc.c comm1_tsc = perf_time_to_tsc(comm1_time, &tc); tc 152 tools/perf/arch/x86/tests/perf-time-to-tsc.c comm2_tsc = perf_time_to_tsc(comm2_time, &tc); tc 71 tools/perf/arch/x86/util/intel-bts.c struct perf_tsc_conversion tc = { .time_mult = 0, }; tc 83 tools/perf/arch/x86/util/intel-bts.c err = perf_read_tsc_conversion(pc, &tc); tc 88 tools/perf/arch/x86/util/intel-bts.c cap_user_time_zero = tc.time_mult != 0; tc 96 tools/perf/arch/x86/util/intel-bts.c auxtrace_info->priv[INTEL_BTS_TIME_SHIFT] = tc.time_shift; tc 97 tools/perf/arch/x86/util/intel-bts.c auxtrace_info->priv[INTEL_BTS_TIME_MULT] = tc.time_mult; tc 98 tools/perf/arch/x86/util/intel-bts.c auxtrace_info->priv[INTEL_BTS_TIME_ZERO] = tc.time_zero; tc 325 tools/perf/arch/x86/util/intel-pt.c struct perf_tsc_conversion tc = { .time_mult = 0, }; tc 360 tools/perf/arch/x86/util/intel-pt.c err = perf_read_tsc_conversion(pc, &tc); tc 365 tools/perf/arch/x86/util/intel-pt.c cap_user_time_zero = tc.time_mult != 0; tc 375 tools/perf/arch/x86/util/intel-pt.c auxtrace_info->priv[INTEL_PT_TIME_SHIFT] = tc.time_shift; tc 376 tools/perf/arch/x86/util/intel-pt.c auxtrace_info->priv[INTEL_PT_TIME_MULT] = tc.time_mult; tc 377 tools/perf/arch/x86/util/intel-pt.c auxtrace_info->priv[INTEL_PT_TIME_ZERO] = tc.time_zero; tc 16 tools/perf/arch/x86/util/tsc.c struct perf_tsc_conversion *tc) tc 25 tools/perf/arch/x86/util/tsc.c tc->time_mult = pc->time_mult; tc 26 tools/perf/arch/x86/util/tsc.c tc->time_shift = pc->time_shift; tc 27 tools/perf/arch/x86/util/tsc.c tc->time_zero = pc->time_zero; tc 66 tools/perf/arch/x86/util/tsc.c struct perf_tsc_conversion tc; tc 71 tools/perf/arch/x86/util/tsc.c err = perf_read_tsc_conversion(pc, &tc); tc 79 tools/perf/arch/x86/util/tsc.c event.time_conv.time_mult = tc.time_mult; tc 80 tools/perf/arch/x86/util/tsc.c event.time_conv.time_shift = tc.time_shift; tc 81 tools/perf/arch/x86/util/tsc.c event.time_conv.time_zero = tc.time_zero; tc 55 tools/perf/util/intel-bts.c struct perf_tsc_conversion tc; tc 610 tools/perf/util/intel-bts.c timestamp = perf_time_to_tsc(sample->time, &bts->tc); tc 872 tools/perf/util/intel-bts.c bts->tc.time_shift = auxtrace_info->priv[INTEL_BTS_TIME_SHIFT]; tc 873 tools/perf/util/intel-bts.c bts->tc.time_mult = auxtrace_info->priv[INTEL_BTS_TIME_MULT]; tc 874 tools/perf/util/intel-bts.c bts->tc.time_zero = auxtrace_info->priv[INTEL_BTS_TIME_ZERO]; tc 77 tools/perf/util/intel-pt.c struct perf_tsc_conversion tc; tc 843 tools/perf/util/intel-pt.c quot = ns / pt->tc.time_mult; tc 844 tools/perf/util/intel-pt.c rem = ns % pt->tc.time_mult; tc 845 tools/perf/util/intel-pt.c return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) / tc 846 tools/perf/util/intel-pt.c pt->tc.time_mult; tc 1224 tools/perf/util/intel-pt.c sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc); tc 1747 tools/perf/util/intel-pt.c sample.time = tsc_to_perf_time(timestamp, &pt->tc); tc 1850 tools/perf/util/intel-pt.c tm = pt->timeless_decoding ? 0 : tsc_to_perf_time(tm, &pt->tc); tc 2381 tools/perf/util/intel-pt.c &pt->tc); tc 2418 tools/perf/util/intel-pt.c &pt->tc)); tc 2497 tools/perf/util/intel-pt.c &pt->tc)); tc 2516 tools/perf/util/intel-pt.c perf_time_to_tsc(sample->time, &pt->tc)); tc 2542 tools/perf/util/intel-pt.c timestamp = perf_time_to_tsc(sample->time, &pt->tc); tc 2955 tools/perf/util/intel-pt.c tsc = perf_time_to_tsc(ns, &pt->tc); tc 2958 tools/perf/util/intel-pt.c tm = tsc_to_perf_time(tsc, &pt->tc); tc 2965 tools/perf/util/intel-pt.c tm = tsc_to_perf_time(++tsc, &pt->tc); tc 2975 tools/perf/util/intel-pt.c tsc = perf_time_to_tsc(ns, &pt->tc); tc 2978 tools/perf/util/intel-pt.c tm = tsc_to_perf_time(tsc, &pt->tc); tc 2985 tools/perf/util/intel-pt.c tm = tsc_to_perf_time(--tsc, &pt->tc); tc 3107 tools/perf/util/intel-pt.c pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT]; tc 3108 tools/perf/util/intel-pt.c pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT]; tc 3109 tools/perf/util/intel-pt.c pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO]; tc 3178 tools/perf/util/intel-pt.c if (pt->timeless_decoding && !pt->tc.time_mult) tc 3179 tools/perf/util/intel-pt.c pt->tc.time_mult = 1; tc 3248 tools/perf/util/intel-pt.c if (pt->tc.time_mult) { tc 371 tools/perf/util/jitdump.c struct perf_tsc_conversion tc; tc 376 tools/perf/util/jitdump.c tc.time_shift = jd->session->time_conv.time_shift; tc 377 tools/perf/util/jitdump.c tc.time_mult = jd->session->time_conv.time_mult; tc 378 tools/perf/util/jitdump.c tc.time_zero = jd->session->time_conv.time_zero; tc 380 tools/perf/util/jitdump.c if (!tc.time_mult) tc 383 tools/perf/util/jitdump.c return tsc_to_perf_time(timestamp, &tc); tc 32 tools/perf/util/term.c struct termios tc; tc 35 tools/perf/util/term.c tc = *old; tc 36 tools/perf/util/term.c tc.c_lflag &= ~(ICANON | ECHO); tc 37 tools/perf/util/term.c tc.c_cc[VMIN] = 0; tc 38 tools/perf/util/term.c tc.c_cc[VTIME] = 0; tc 39 tools/perf/util/term.c tcsetattr(0, TCSANOW, &tc); tc 7 tools/perf/util/tsc.c u64 perf_time_to_tsc(u64 ns, struct perf_tsc_conversion *tc) tc 11 tools/perf/util/tsc.c t = ns - tc->time_zero; tc 12 tools/perf/util/tsc.c quot = t / tc->time_mult; tc 13 tools/perf/util/tsc.c rem = t % tc->time_mult; tc 14 tools/perf/util/tsc.c return (quot << tc->time_shift) + tc 15 tools/perf/util/tsc.c (rem << tc->time_shift) / tc->time_mult; tc 18 tools/perf/util/tsc.c u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc) tc 22 tools/perf/util/tsc.c quot = cyc >> tc->time_shift; tc 23 tools/perf/util/tsc.c rem = cyc & (((u64)1 << tc->time_shift) - 1); tc 24 tools/perf/util/tsc.c return tc->time_zero + quot * tc->time_mult + tc 25 tools/perf/util/tsc.c ((rem * tc->time_mult) >> tc->time_shift); tc 16 tools/perf/util/tsc.h struct perf_tsc_conversion *tc); tc 18 tools/perf/util/tsc.h u64 perf_time_to_tsc(u64 ns, struct perf_tsc_conversion *tc); tc 19 tools/perf/util/tsc.h u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc); tc 41 tools/testing/selftests/kvm/x86_64/mmio_warning_test.c struct thread_context *tc = (struct thread_context *)arg; tc 43 tools/testing/selftests/kvm/x86_64/mmio_warning_test.c int kvmcpu = tc->kvmcpu; tc 44 tools/testing/selftests/kvm/x86_64/mmio_warning_test.c struct kvm_run *run = tc->run; tc 58 tools/testing/selftests/kvm/x86_64/mmio_warning_test.c struct thread_context tc; tc 68 tools/testing/selftests/kvm/x86_64/mmio_warning_test.c tc.kvmcpu = kvmcpu; tc 69 tools/testing/selftests/kvm/x86_64/mmio_warning_test.c tc.run = run; tc 72 tools/testing/selftests/kvm/x86_64/mmio_warning_test.c pthread_create(&th[i], NULL, thr, (void *)(uintptr_t)&tc);