/linux-4.4.14/drivers/net/ethernet/amd/ |
D | ariadne.c | 92 int cur_tx, cur_rx; /* The next free ring entry */ member 125 priv->cur_rx = priv->cur_tx = 0; in ariadne_init_ring() 308 while (dirty_tx < priv->cur_tx) { in ariadne_interrupt() 345 if (priv->cur_tx - dirty_tx >= TX_RING_SIZE) { in ariadne_interrupt() 347 dirty_tx, priv->cur_tx, in ariadne_interrupt() 354 dirty_tx > priv->cur_tx - TX_RING_SIZE + 2) { in ariadne_interrupt() 576 entry = priv->cur_tx % TX_RING_SIZE; in ariadne_start_xmit() 596 priv->cur_tx++; in ariadne_start_xmit() 597 if ((priv->cur_tx >= TX_RING_SIZE) && in ariadne_start_xmit() 601 priv->cur_tx, priv->dirty_tx); in ariadne_start_xmit() [all …]
|
D | atarilance.c | 222 int cur_rx, cur_tx; /* The next free ring entry */ member 689 lp->cur_rx = lp->cur_tx = 0; in lance_init_ring() 748 lp->dirty_tx, lp->cur_tx, in lance_tx_timeout() 814 entry = lp->cur_tx & TX_RING_MOD_MASK; in lance_start_xmit() 828 lp->cur_tx++; in lance_start_xmit() 829 while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) { in lance_start_xmit() 830 lp->cur_tx -= TX_RING_SIZE; in lance_start_xmit() 884 while( dirty_tx < lp->cur_tx) { in lance_interrupt() 920 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) { in lance_interrupt() 923 dirty_tx, lp->cur_tx, lp->tx_full )); in lance_interrupt() [all …]
|
D | lance.c | 248 int cur_rx, cur_tx; /* The next free ring entry */ member 867 lp->cur_rx = lp->cur_tx = 0; in lance_init_ring() 931 lp->dirty_tx, lp->cur_tx, netif_queue_stopped(dev) ? " (full)" : "", in lance_tx_timeout() 971 entry = lp->cur_tx & TX_RING_MOD_MASK; in lance_start_xmit() 1006 lp->cur_tx++; in lance_start_xmit() 1012 if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE) in lance_start_xmit() 1051 while (dirty_tx < lp->cur_tx) { in lance_interrupt() 1095 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) { in lance_interrupt() 1097 dirty_tx, lp->cur_tx, in lance_interrupt() 1105 dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) in lance_interrupt()
|
D | pcnet32.c | 275 unsigned int cur_rx, cur_tx; /* The next free ring entry */ member 1252 while (dirty_tx != lp->cur_tx) { in pcnet32_tx() 1312 delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size); in pcnet32_tx() 1315 dirty_tx, lp->cur_tx, lp->tx_full); in pcnet32_tx() 2307 lp->cur_rx = lp->cur_tx = 0; in pcnet32_init_ring() 2411 lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "", in pcnet32_tx_timeout() 2458 entry = lp->cur_tx & lp->tx_mod_mask; in pcnet32_start_xmit() 2479 lp->cur_tx++; in pcnet32_start_xmit()
|
/linux-4.4.14/drivers/net/ethernet/packetengines/ |
D | yellowfin.c | 326 unsigned int cur_tx, dirty_tx; member 689 yp->cur_tx, yp->dirty_tx, in yellowfin_tx_timeout() 714 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE) in yellowfin_tx_timeout() 728 yp->cur_rx = yp->cur_tx = 0; in yellowfin_init_ring() 820 entry = yp->cur_tx % TX_RING_SIZE; in yellowfin_start_xmit() 850 yp->cur_tx++; in yellowfin_start_xmit() 858 yp->cur_tx++; in yellowfin_start_xmit() 860 unsigned next_entry = yp->cur_tx % TX_RING_SIZE; in yellowfin_start_xmit() 875 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE) in yellowfin_start_xmit() 882 yp->cur_tx, entry); in yellowfin_start_xmit() [all …]
|
D | hamachi.c | 495 unsigned int cur_tx, dirty_tx; member 998 for (; hmp->cur_tx - hmp->dirty_tx > 0; hmp->dirty_tx++) { in hamachi_tx() 1112 hmp->cur_rx = hmp->cur_tx = 0; in hamachi_tx_timeout() 1166 hmp->cur_rx = hmp->cur_tx = 0; in hamachi_init_ring() 1221 printk(KERN_WARNING "%s: Hamachi transmit queue full at slot %d.\n",dev->name, hmp->cur_tx); in hamachi_start_xmit() 1235 entry = hmp->cur_tx % TX_RING_SIZE; in hamachi_start_xmit() 1257 hmp->cur_tx++; in hamachi_start_xmit() 1276 if ((hmp->cur_tx - hmp->dirty_tx) < (TX_RING_SIZE - 4)) in hamachi_start_xmit() 1285 dev->name, hmp->cur_tx, entry); in hamachi_start_xmit() 1330 for (; hmp->cur_tx - hmp->dirty_tx > 0; hmp->dirty_tx++){ in hamachi_interrupt() [all …]
|
/linux-4.4.14/drivers/net/arcnet/ |
D | arcnet.c | 473 lp->cur_tx = lp->next_tx = -1; in arcnet_open() 609 lp->hw.status(dev), lp->cur_tx, lp->next_tx, skb->len, skb->protocol); in arcnet_send_packet() 690 lp->hw.status(dev), lp->intmask, lp->next_tx, lp->cur_tx); in go_tx() 692 if (lp->cur_tx != -1 || lp->next_tx == -1) in go_tx() 698 lp->cur_tx = lp->next_tx; in go_tx() 702 lp->hw.command(dev, TXcmd | (lp->cur_tx << 3)); in go_tx() 728 lp->hw.command(dev, NOTXcmd | (lp->cur_tx << 3)); in arcnet_timeout() 745 if (lp->cur_tx == -1) in arcnet_timeout() 850 if (lp->cur_tx != -1 && !lp->timed_out) { in arcnet_interrupt() 882 if (lp->cur_tx != -1) in arcnet_interrupt() [all …]
|
D | arc-rawmode.c | 121 lp->next_tx, lp->cur_tx, bufnum); in prepare_tx()
|
D | rfc1051.c | 215 lp->next_tx, lp->cur_tx, bufnum); in prepare_tx()
|
D | arcdevice.h | 252 int cur_tx, /* buffer used by current transmit, or -1 */ member
|
D | capmode.c | 145 lp->next_tx, lp->cur_tx, bufnum); in prepare_tx()
|
D | rfc1201.c | 483 lp->next_tx, lp->cur_tx, bufnum); in prepare_tx()
|
/linux-4.4.14/drivers/net/ethernet/ |
D | fealnx.c | 403 struct fealnx_desc *cur_tx; member 1157 iowrite32(np->tx_ring_dma + ((char*)np->cur_tx - (char*)np->tx_ring), in enable_rxtx() 1280 np->cur_tx = &np->tx_ring[0]; in init_ring() 1385 np->cur_tx = &np->tx_ring[0]; in reset_tx_descriptors() 1494 long tx_status = np->cur_tx->status; in intr_handler() 1495 long tx_control = np->cur_tx->control; in intr_handler() 1500 next = np->cur_tx->next_desc_logical; in intr_handler() 1537 pci_unmap_single(np->pci_dev, np->cur_tx->buffer, in intr_handler() 1538 np->cur_tx->skbuff->len, PCI_DMA_TODEVICE); in intr_handler() 1539 dev_kfree_skb_irq(np->cur_tx->skbuff); in intr_handler() [all …]
|
D | ethoc.c | 208 unsigned int cur_tx; member 309 dev->cur_tx = 0; in ethoc_init_ring() 526 if (bd.stat & TX_BD_READY || (priv->dty_tx == priv->cur_tx)) { in ethoc_tx() 537 (priv->dty_tx == priv->cur_tx)) in ethoc_tx() 545 if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2)) in ethoc_tx() 868 entry = priv->cur_tx % priv->num_tx; in ethoc_start_xmit() 870 priv->cur_tx++; in ethoc_start_xmit() 888 if (priv->cur_tx == (priv->dty_tx + priv->num_tx)) { in ethoc_start_xmit()
|
/linux-4.4.14/drivers/net/ethernet/smsc/ |
D | epic100.c | 268 unsigned int cur_tx, dirty_tx; member 790 ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx); in epic_restart() 881 ep->dirty_tx, ep->cur_tx); in epic_tx_timeout() 905 ep->dirty_tx = ep->cur_tx = 0; in epic_init_ring() 960 free_count = ep->cur_tx - ep->dirty_tx; in epic_start_xmit() 961 entry = ep->cur_tx % TX_RING_SIZE; in epic_start_xmit() 982 ep->cur_tx++; in epic_start_xmit() 1021 unsigned int dirty_tx, cur_tx; in epic_tx() local 1027 cur_tx = ep->cur_tx; in epic_tx() 1028 for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) { in epic_tx() [all …]
|
/linux-4.4.14/drivers/net/ethernet/dec/tulip/ |
D | tulip_core.c | 334 tp->cur_rx = tp->cur_tx = 0; in tulip_up() 367 tp->tx_buffers[tp->cur_tx].skb = NULL; in tulip_up() 368 tp->tx_buffers[tp->cur_tx].mapping = mapping; in tulip_up() 371 tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192); in tulip_up() 372 tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping); in tulip_up() 373 tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned); in tulip_up() 375 tp->cur_tx++; in tulip_up() 677 entry = tp->cur_tx % TX_RING_SIZE; in tulip_start_xmit() 685 if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */ in tulip_start_xmit() 687 } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) { in tulip_start_xmit() [all …]
|
D | winbond-840.c | 314 unsigned int cur_tx, dirty_tx; member 834 np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0; in init_rxtx_rings() 948 np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes); in tx_timeout() 1008 entry = np->cur_tx % TX_RING_SIZE; in start_tx() 1037 np->cur_tx++; in start_tx() 1046 if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN || in start_tx() 1056 np->cur_tx, entry); in start_tx() 1064 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { in netdev_tx_done() 1102 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART && in netdev_tx_done() 1143 np->cur_tx != np->dirty_tx) { in intr_handler() [all …]
|
D | interrupt.c | 588 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0; in tulip_interrupt() 644 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) { in tulip_interrupt() 647 dirty_tx, tp->cur_tx); in tulip_interrupt() 652 if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2) in tulip_interrupt()
|
D | tulip.h | 422 unsigned int cur_rx, cur_tx; /* The next free ring entry */ member
|
/linux-4.4.14/drivers/net/ethernet/dlink/ |
D | sundance.c | 398 unsigned int cur_tx, dirty_tx; member 1001 np->cur_tx, np->cur_tx % TX_RING_SIZE, in tx_timeout() 1016 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { in tx_timeout() 1030 np->cur_rx = np->cur_tx = 0; in init_ring() 1078 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE]; in tx_poll() 1081 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) { in tx_poll() 1106 entry = np->cur_tx % TX_RING_SIZE; in start_tx() 1120 np->cur_tx++; in start_tx() 1126 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 && in start_tx() 1135 dev->name, np->cur_tx, entry); in start_tx() [all …]
|
D | dl2k.c | 597 np->cur_rx = np->cur_tx = 0; in alloc_list() 656 entry = np->cur_tx % TX_RING_SIZE; in start_xmit() 693 np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE; in start_xmit() 694 if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE in start_xmit() 746 if (np->cur_tx != np->old_tx) in rio_interrupt() 770 while (entry != np->cur_tx) { in rio_free_tx() 798 ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE in rio_free_tx()
|
D | dl2k.h | 398 unsigned long cur_tx, old_tx; member
|
/linux-4.4.14/drivers/net/ethernet/stmicro/stmmac/ |
D | ring_mode.c | 35 unsigned int entry = priv->cur_tx % txsize; in stmmac_jumbo_frm() 65 entry = (++priv->cur_tx) % txsize; in stmmac_jumbo_frm()
|
D | chain_mode.c | 35 unsigned int entry = priv->cur_tx % txsize; in stmmac_jumbo_frm() 57 entry = (++priv->cur_tx) % txsize; in stmmac_jumbo_frm()
|
D | stmmac.h | 55 unsigned int cur_tx; member
|
D | stmmac_main.c | 204 return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1; in stmmac_tx_avail() 230 if ((priv->dirty_tx == priv->cur_tx) && in stmmac_enable_eee_mode() 1110 priv->cur_tx = 0; in init_dma_desc_rings() 1325 while (priv->dirty_tx != priv->cur_tx) { in stmmac_tx_clean() 1357 priv->cur_tx, priv->dirty_tx); in stmmac_tx_clean() 1442 priv->cur_tx = 0; in stmmac_tx_err() 1972 entry = priv->cur_tx % txsize; in stmmac_xmit() 2007 entry = (++priv->cur_tx) % txsize; in stmmac_xmit() 2050 priv->cur_tx++; in stmmac_xmit() 2054 __func__, (priv->cur_tx % txsize), in stmmac_xmit() [all …]
|
/linux-4.4.14/drivers/net/ethernet/3com/ |
D | 3c515.c | 310 unsigned int cur_rx, cur_tx; /* The next free ring entry */ member 842 vp->cur_tx = vp->dirty_tx = 0; in corkscrew_open() 980 vp->cur_tx); in corkscrew_timeout() 1013 int entry = vp->cur_tx % TX_RING_SIZE; in corkscrew_start_xmit() 1020 if (vp->cur_tx != 0) in corkscrew_start_xmit() 1021 prev_entry = &vp->tx_ring[(vp->cur_tx - 1) % TX_RING_SIZE]; in corkscrew_start_xmit() 1026 dev->name, vp->cur_tx); in corkscrew_start_xmit() 1050 vp->cur_tx++; in corkscrew_start_xmit() 1051 if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) in corkscrew_start_xmit() 1175 while (lp->cur_tx - dirty_tx > 0) { in corkscrew_interrupt() [all …]
|
D | 3c59x.c | 604 unsigned int cur_rx, cur_tx; /* The next free ring entry */ member 1694 vp->cur_tx = vp->dirty_tx = 0; in vortex_up() 1936 if (vp->cur_tx - vp->dirty_tx > 0 && ioread32(ioaddr + DownListPtr) == 0) in vortex_tx_timeout() 1939 if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE) { in vortex_tx_timeout() 2132 int entry = vp->cur_tx % TX_RING_SIZE; in boomerang_start_xmit() 2134 struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE]; in boomerang_start_xmit() 2141 dev->name, vp->cur_tx); in boomerang_start_xmit() 2153 if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) { in boomerang_start_xmit() 2238 vp->cur_tx++; in boomerang_start_xmit() 2241 if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) { in boomerang_start_xmit() [all …]
|
/linux-4.4.14/drivers/net/ethernet/nuvoton/ |
D | w90p910_ether.c | 166 unsigned int cur_tx; member 480 ether->cur_tx = 0x0; in w90p910_reset_mac() 607 txbd = ðer->tdesc->desclist[ether->cur_tx]; in w90p910_send_frame() 608 buffer = ether->tdesc->tran_buf[ether->cur_tx]; in w90p910_send_frame() 625 if (++ether->cur_tx >= TX_DESC_SIZE) in w90p910_send_frame() 626 ether->cur_tx = 0; in w90p910_send_frame() 628 txbd = ðer->tdesc->desclist[ether->cur_tx]; in w90p910_send_frame() 955 ether->cur_tx = 0x0; in w90p910_ether_setup()
|
/linux-4.4.14/drivers/net/ethernet/adaptec/ |
D | starfire.c | 560 unsigned int cur_tx, dirty_tx, reap_tx; member 1144 np->cur_rx = np->cur_tx = np->reap_tx = 0; in init_ring() 1193 if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) { in start_tx() 1205 entry = np->cur_tx % TX_RING_SIZE; in start_tx() 1243 dev->name, np->cur_tx, np->dirty_tx, in start_tx() 1247 np->cur_tx += np->tx_info[entry].used_slots; in start_tx() 1251 np->cur_tx += np->tx_info[entry].used_slots; in start_tx() 1255 if (np->cur_tx % (TX_RING_SIZE / 2) == 0) in start_tx() 1268 if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE) in start_tx() 1367 (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) { in intr_handler() [all …]
|
/linux-4.4.14/drivers/net/ethernet/samsung/sxgbe/ |
D | sxgbe_main.c | 195 return queue->dirty_tx + tx_qsize - queue->cur_tx - 1; in sxgbe_tx_avail() 427 tx_ring->cur_tx = 0; in init_tx_ring() 749 while (tqueue->dirty_tx != tqueue->cur_tx) { in sxgbe_tx_queue_clean() 762 __func__, tqueue->cur_tx, tqueue->dirty_tx); in sxgbe_tx_queue_clean() 841 tx_ring->cur_tx = 0; in sxgbe_restart_tx_queue() 1325 entry = tqueue->cur_tx % tx_rsize; in sxgbe_xmit() 1350 entry = (++tqueue->cur_tx) % tx_rsize; in sxgbe_xmit() 1372 entry = (++tqueue->cur_tx) % tx_rsize; in sxgbe_xmit() 1412 tqueue->cur_tx++; in sxgbe_xmit() 1416 __func__, tqueue->cur_tx % tx_rsize, in sxgbe_xmit()
|
D | sxgbe_common.h | 388 unsigned int cur_tx; member
|
/linux-4.4.14/drivers/net/ethernet/via/ |
D | via-rhine.c | 475 unsigned int cur_tx, dirty_tx; member 1326 rp->dirty_tx = rp->cur_tx = 0; in alloc_tbufs() 1783 return (rp->cur_tx - rp->dirty_tx) >= TX_QUEUE_LEN; in rhine_tx_queue_full() 1798 entry = rp->cur_tx % TX_RING_SIZE; in rhine_start_tx() 1860 rp->cur_tx++; in rhine_start_tx() 1889 rp->cur_tx - 1, entry); in rhine_start_tx() 1936 unsigned int cur_tx; in rhine_tx() local 1946 cur_tx = rp->cur_tx; in rhine_tx() 1948 while (dirty_tx != cur_tx) { in rhine_tx()
|
/linux-4.4.14/drivers/net/ethernet/realtek/ |
D | 8139too.c | 592 unsigned long cur_tx; member 1460 tp->cur_tx = 0; in rtl8139_init_ring() 1655 tp->cur_tx = 0; in rtl8139_tx_clear() 1675 tp->cur_tx, tp->dirty_tx); in rtl8139_tx_timeout_task() 1727 entry = tp->cur_tx % NUM_TX_DESC; in rtl8139_start_xmit() 1751 tp->cur_tx++; in rtl8139_start_xmit() 1753 if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx) in rtl8139_start_xmit() 1774 tx_left = tp->cur_tx - dirty_tx; in rtl8139_tx_interrupt() 1819 if (tp->cur_tx - dirty_tx > NUM_TX_DESC) { in rtl8139_tx_interrupt() 1821 dirty_tx, tp->cur_tx); in rtl8139_tx_interrupt()
|
D | r8169.c | 77 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx) 783 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ member 4961 tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0; in rtl8169_init_ring_indexes() 6843 tp->cur_tx = tp->dirty_tx = 0; in rtl8169_tx_clear() 6884 entry = tp->cur_tx; in rtl8169_xmit_frags() 6923 rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag); in rtl8169_xmit_frags() 7112 unsigned int entry = tp->cur_tx % NUM_TX_DESC; in rtl8169_start_xmit() 7172 tp->cur_tx += frags + 1; in rtl8169_start_xmit() 7264 tx_left = tp->cur_tx - dirty_tx; in rtl_tx() 7315 if (tp->cur_tx != dirty_tx) { in rtl_tx()
|
/linux-4.4.14/drivers/net/ethernet/sis/ |
D | sis190.c | 272 u32 cur_tx; member 695 pending = tp->cur_tx - dirty_tx; in sis190_tx_interrupt() 808 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0; in sis190_init_ring_indexes() 1122 tp->cur_tx = tp->dirty_tx = 0; in sis190_tx_clear() 1192 entry = tp->cur_tx % NUM_TX_DESC; in sis190_start_xmit() 1228 tp->cur_tx++; in sis190_start_xmit() 1235 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) { in sis190_start_xmit()
|
D | sis900.c | 180 unsigned int cur_tx, dirty_tx; member 1131 sis_priv->dirty_tx = sis_priv->cur_tx = 0; in sis900_init_tx_ring() 1558 sis_priv->dirty_tx = sis_priv->cur_tx = 0; in sis900_tx_timeout() 1610 entry = sis_priv->cur_tx % NUM_TX_DESC; in sis900_start_xmit() 1627 sis_priv->cur_tx ++; in sis900_start_xmit() 1628 index_cur_tx = sis_priv->cur_tx; in sis900_start_xmit() 1892 for (; sis_priv->dirty_tx != sis_priv->cur_tx; sis_priv->dirty_tx++) { in sis900_finish_xmit() 1940 sis_priv->cur_tx - sis_priv->dirty_tx < NUM_TX_DESC - 4) { in sis900_finish_xmit()
|
/linux-4.4.14/drivers/net/ethernet/natsemi/ |
D | sonic.c | 291 int entry = lp->cur_tx; in sonic_interrupt() 341 lp->cur_tx = entry; in sonic_interrupt() 685 lp->cur_tx = lp->next_tx = 0; in sonic_init()
|
D | natsemi.c | 559 unsigned int cur_tx, dirty_tx; member 1973 np->dirty_tx = np->cur_tx = 0; in init_ring() 2079 np->dirty_tx = np->cur_tx = 0; in reinit_ring() 2097 entry = np->cur_tx % TX_RING_SIZE; in start_tx() 2118 np->cur_tx++; in start_tx() 2119 if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) { in start_tx() 2121 if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) in start_tx() 2134 dev->name, np->cur_tx, entry); in start_tx() 2143 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { in netdev_tx_done() 2176 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { in netdev_tx_done() [all …]
|
D | sonic.h | 317 unsigned int cur_tx; /* first unacked transmit packet */ member
|
/linux-4.4.14/drivers/net/wireless/ |
D | adm8211.c | 314 for (dirty_tx = priv->dirty_tx; priv->cur_tx - dirty_tx; dirty_tx++) { in adm8211_interrupt_tci() 347 if (priv->cur_tx - dirty_tx < priv->tx_ring_size - 2) in adm8211_interrupt_tci() 1468 priv->cur_rx = priv->cur_tx = priv->dirty_tx = 0; in adm8211_init_rings() 1631 if (priv->cur_tx - priv->dirty_tx == priv->tx_ring_size / 2) in adm8211_tx_raw() 1636 if (priv->cur_tx - priv->dirty_tx == priv->tx_ring_size - 2) in adm8211_tx_raw() 1639 entry = priv->cur_tx % priv->tx_ring_size; in adm8211_tx_raw() 1654 priv->cur_tx++; in adm8211_tx_raw()
|
D | adm8211.h | 547 unsigned int cur_tx, dirty_tx, cur_rx; member
|
/linux-4.4.14/drivers/net/ethernet/freescale/fs_enet/ |
D | fs_enet-main.c | 398 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; in fs_init_bds() 556 bdp = fep->cur_tx; in fs_enet_start_xmit() 630 fep->cur_tx = bdp; in fs_enet_start_xmit() 662 wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY); in fs_timeout()
|
D | fs_enet.h | 142 cbd_t __iomem *cur_tx; member
|
/linux-4.4.14/drivers/net/ethernet/renesas/ |
D | ravb_main.c | 232 priv->cur_tx[q] = 0; in ravb_ring_format() 433 for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) { in ravb_tx_free() 1073 data[i++] = priv->cur_tx[q]; in ravb_get_ethtool_stats() 1316 if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) * in ravb_start_xmit() 1324 entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC); in ravb_start_xmit() 1381 priv->cur_tx[q] += NUM_TX_DESC; in ravb_start_xmit() 1382 if (priv->cur_tx[q] - priv->dirty_tx[q] > in ravb_start_xmit()
|
D | sh_eth.h | 514 u32 cur_tx, dirty_tx; member
|
D | sh_eth.c | 1173 mdp->cur_tx = 0; in sh_eth_ring_format() 1421 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { in sh_eth_txfree() 1695 intr_status, mdp->cur_tx, mdp->dirty_tx, in sh_eth_error() 2198 data[i++] = mdp->cur_tx; in sh_eth_get_ethtool_stats() 2389 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) { in sh_eth_start_xmit() 2402 entry = mdp->cur_tx % mdp->num_tx_ring; in sh_eth_start_xmit() 2423 mdp->cur_tx++; in sh_eth_start_xmit()
|
D | ravb.h | 802 u32 cur_tx[NUM_TX_QUEUE]; member
|
/linux-4.4.14/drivers/net/ethernet/freescale/ |
D | fec_main.c | 296 (const char *)txq->cur_tx) / fep->bufdesc_size - 1; in fec_enet_get_free_txdesc_num() 336 bdp == txq->cur_tx ? 'S' : ' ', in fec_dump() 373 struct bufdesc *bdp = txq->cur_tx; in fec_enet_txq_submit_frag_skb() 444 bdp = txq->cur_tx; in fec_enet_txq_submit_frag_skb() 483 bdp = txq->cur_tx; in fec_enet_txq_submit_skb() 566 txq->cur_tx = bdp; in fec_enet_txq_submit_skb() 697 struct bufdesc *bdp = txq->cur_tx; in fec_enet_txq_submit_tso() 760 txq->cur_tx = bdp; in fec_enet_txq_submit_tso() 842 txq->cur_tx = bdp; in fec_enet_bd_init() 1221 while (bdp != READ_ONCE(txq->cur_tx)) { in fec_enet_tx_queue() [all …]
|
D | fec.h | 446 struct bufdesc *cur_tx; member
|
D | gianfar.h | 995 struct txbd8 *cur_tx; member
|
D | gianfar.c | 186 tx_queue->cur_tx = tx_queue->tx_bd_base; in gfar_init_bds() 2392 txbdp = txbdp_start = tx_queue->cur_tx; in gfar_start_xmit() 2524 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); in gfar_start_xmit()
|
/linux-4.4.14/drivers/staging/rdma/amso1100/ |
D | c2.c | 367 c2_port->c2dev->cur_tx = tx_ring->to_use - tx_ring->start; in c2_tx_clean() 640 c2_port->tx_ring.start + c2dev->cur_tx; in c2_up() 1062 c2dev->cur_tx = 0; in c2_probe()
|
D | c2.h | 295 unsigned int cur_tx; member
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
D | chip.c | 8146 u64 cur_tx; in update_synth_timer() local 8166 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0); in update_synth_timer() 8171 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx); in update_synth_timer() 8173 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) { in update_synth_timer() 8182 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx); in update_synth_timer()
|