TX_RING_SIZE 306 drivers/net/ethernet/3com/3c515.c struct boom_tx_desc tx_ring[TX_RING_SIZE]; TX_RING_SIZE 309 drivers/net/ethernet/3com/3c515.c struct sk_buff *tx_skbuff[TX_RING_SIZE]; TX_RING_SIZE 845 drivers/net/ethernet/3com/3c515.c for (i = 0; i < TX_RING_SIZE; i++) TX_RING_SIZE 983 drivers/net/ethernet/3com/3c515.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 1013 drivers/net/ethernet/3com/3c515.c int entry = vp->cur_tx % TX_RING_SIZE; TX_RING_SIZE 1021 drivers/net/ethernet/3com/3c515.c prev_entry = &vp->tx_ring[(vp->cur_tx - 1) % TX_RING_SIZE]; TX_RING_SIZE 1051 drivers/net/ethernet/3com/3c515.c if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) TX_RING_SIZE 1176 drivers/net/ethernet/3com/3c515.c int entry = dirty_tx % TX_RING_SIZE; TX_RING_SIZE 1187 drivers/net/ethernet/3com/3c515.c if (lp->tx_full && (lp->cur_tx - dirty_tx <= TX_RING_SIZE - 1)) { TX_RING_SIZE 1458 drivers/net/ethernet/3com/3c515.c for (i = 0; i < TX_RING_SIZE; i++) TX_RING_SIZE 603 drivers/net/ethernet/3com/3c59x.c struct sk_buff* tx_skbuff[TX_RING_SIZE]; TX_RING_SIZE 1211 drivers/net/ethernet/3com/3c59x.c + sizeof(struct boom_tx_desc) * TX_RING_SIZE, TX_RING_SIZE 1476 drivers/net/ethernet/3com/3c59x.c sizeof(struct boom_tx_desc) * TX_RING_SIZE, TX_RING_SIZE 1683 drivers/net/ethernet/3com/3c59x.c for (i = 0; i < TX_RING_SIZE; i++) TX_RING_SIZE 1913 drivers/net/ethernet/3com/3c59x.c iowrite32(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc), TX_RING_SIZE 1915 drivers/net/ethernet/3com/3c59x.c if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE) { TX_RING_SIZE 2114 drivers/net/ethernet/3com/3c59x.c int entry = vp->cur_tx % TX_RING_SIZE; TX_RING_SIZE 2116 drivers/net/ethernet/3com/3c59x.c struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE]; TX_RING_SIZE 2135 drivers/net/ethernet/3com/3c59x.c if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) { TX_RING_SIZE 2223 drivers/net/ethernet/3com/3c59x.c if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) { TX_RING_SIZE 2420 drivers/net/ethernet/3com/3c59x.c int entry = dirty_tx % TX_RING_SIZE; TX_RING_SIZE 2459 drivers/net/ethernet/3com/3c59x.c if (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1) { TX_RING_SIZE 2762 drivers/net/ethernet/3com/3c59x.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 2798 drivers/net/ethernet/3com/3c59x.c vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE, TX_RING_SIZE 2799 drivers/net/ethernet/3com/3c59x.c vp->cur_tx, vp->cur_tx % TX_RING_SIZE); TX_RING_SIZE 2802 drivers/net/ethernet/3com/3c59x.c &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]); TX_RING_SIZE 2804 drivers/net/ethernet/3com/3c59x.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 3284 drivers/net/ethernet/3com/3c59x.c sizeof(struct boom_tx_desc) * TX_RING_SIZE, TX_RING_SIZE 539 drivers/net/ethernet/adaptec/starfire.c struct tx_ring_info tx_info[TX_RING_SIZE]; TX_RING_SIZE 901 drivers/net/ethernet/adaptec/starfire.c tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN; TX_RING_SIZE 1182 drivers/net/ethernet/adaptec/starfire.c for (i = 0; i < TX_RING_SIZE; i++) TX_RING_SIZE 1199 drivers/net/ethernet/adaptec/starfire.c if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) { TX_RING_SIZE 1212 drivers/net/ethernet/adaptec/starfire.c entry = np->cur_tx % TX_RING_SIZE; TX_RING_SIZE 1220 drivers/net/ethernet/adaptec/starfire.c if (entry >= TX_RING_SIZE - skb_num_frags(skb)) { TX_RING_SIZE 1258 drivers/net/ethernet/adaptec/starfire.c np->tx_info[entry].used_slots = TX_RING_SIZE - entry; TX_RING_SIZE 1267 drivers/net/ethernet/adaptec/starfire.c if (np->cur_tx % (TX_RING_SIZE / 2) == 0) TX_RING_SIZE 1280 drivers/net/ethernet/adaptec/starfire.c if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE) TX_RING_SIZE 1286 drivers/net/ethernet/adaptec/starfire.c entry = prev_tx % TX_RING_SIZE; TX_RING_SIZE 1294 drivers/net/ethernet/adaptec/starfire.c entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE; TX_RING_SIZE 1380 drivers/net/ethernet/adaptec/starfire.c entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE; TX_RING_SIZE 1401 drivers/net/ethernet/adaptec/starfire.c (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) { TX_RING_SIZE 1988 drivers/net/ethernet/adaptec/starfire.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 104 drivers/net/ethernet/amd/7990.c for (t = 0; t < TX_RING_SIZE; t++) { \ TX_RING_SIZE 41 drivers/net/ethernet/amd/7990.h #define TX_RING_MOD_MASK (TX_RING_SIZE - 1) TX_RING_SIZE 87 drivers/net/ethernet/amd/7990.h volatile struct lance_tx_desc btx_ring[TX_RING_SIZE]; TX_RING_SIZE 90 drivers/net/ethernet/amd/7990.h volatile char tx_buf[TX_RING_SIZE][TX_BUFF_SIZE]; TX_RING_SIZE 75 drivers/net/ethernet/amd/a2065.c #define TX_RING_MOD_MASK (TX_RING_SIZE - 1) TX_RING_SIZE 97 drivers/net/ethernet/amd/a2065.c struct lance_tx_desc btx_ring[TX_RING_SIZE]; TX_RING_SIZE 100 drivers/net/ethernet/amd/a2065.c char tx_buf[TX_RING_SIZE][TX_BUFF_SIZE]; TX_RING_SIZE 88 drivers/net/ethernet/amd/ariadne.c volatile struct TDRE *tx_ring[TX_RING_SIZE]; TX_RING_SIZE 90 drivers/net/ethernet/amd/ariadne.c volatile u_short *tx_buff[TX_RING_SIZE]; TX_RING_SIZE 100 drivers/net/ethernet/amd/ariadne.c struct TDRE tx_ring[TX_RING_SIZE]; TX_RING_SIZE 102 drivers/net/ethernet/amd/ariadne.c u_short tx_buff[TX_RING_SIZE][PKT_BUF_SIZE / sizeof(u_short)]; TX_RING_SIZE 129 drivers/net/ethernet/amd/ariadne.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 309 drivers/net/ethernet/amd/ariadne.c int entry = dirty_tx % TX_RING_SIZE; TX_RING_SIZE 345 drivers/net/ethernet/amd/ariadne.c if (priv->cur_tx - dirty_tx >= TX_RING_SIZE) { TX_RING_SIZE 349 drivers/net/ethernet/amd/ariadne.c dirty_tx += TX_RING_SIZE; TX_RING_SIZE 354 drivers/net/ethernet/amd/ariadne.c dirty_tx > priv->cur_tx - TX_RING_SIZE + 2) { TX_RING_SIZE 470 drivers/net/ethernet/amd/ariadne.c lance->RDP = swapw(((u_short)-TX_RING_SIZE)); TX_RING_SIZE 576 drivers/net/ethernet/amd/ariadne.c entry = priv->cur_tx % TX_RING_SIZE; TX_RING_SIZE 597 drivers/net/ethernet/amd/ariadne.c if ((priv->cur_tx >= TX_RING_SIZE) && TX_RING_SIZE 598 drivers/net/ethernet/amd/ariadne.c (priv->dirty_tx >= TX_RING_SIZE)) { TX_RING_SIZE 603 drivers/net/ethernet/amd/ariadne.c priv->cur_tx -= TX_RING_SIZE; TX_RING_SIZE 604 drivers/net/ethernet/amd/ariadne.c priv->dirty_tx -= TX_RING_SIZE; TX_RING_SIZE 612 drivers/net/ethernet/amd/ariadne.c if (lowb(priv->tx_ring[(entry + 1) % TX_RING_SIZE]->TMD1) != 0) { TX_RING_SIZE 113 drivers/net/ethernet/amd/atarilance.c #define TX_RING_MOD_MASK (TX_RING_SIZE - 1) TX_RING_SIZE 157 drivers/net/ethernet/amd/atarilance.c struct lance_tx_head tx_head[TX_RING_SIZE]; TX_RING_SIZE 705 drivers/net/ethernet/amd/atarilance.c for( i = 0; i < TX_RING_SIZE; i++ ) { TX_RING_SIZE 756 drivers/net/ethernet/amd/atarilance.c for( i = 0 ; i < TX_RING_SIZE; i++ ) TX_RING_SIZE 830 drivers/net/ethernet/amd/atarilance.c while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) { TX_RING_SIZE 831 drivers/net/ethernet/amd/atarilance.c lp->cur_tx -= TX_RING_SIZE; TX_RING_SIZE 832 drivers/net/ethernet/amd/atarilance.c lp->dirty_tx -= TX_RING_SIZE; TX_RING_SIZE 921 drivers/net/ethernet/amd/atarilance.c if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) { TX_RING_SIZE 925 drivers/net/ethernet/amd/atarilance.c dirty_tx += TX_RING_SIZE; TX_RING_SIZE 930 drivers/net/ethernet/amd/atarilance.c dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) { TX_RING_SIZE 157 drivers/net/ethernet/amd/declance.c #define TX_RING_MOD_MASK (TX_RING_SIZE - 1) TX_RING_SIZE 223 drivers/net/ethernet/amd/declance.c struct lance_tx_desc btx_ring[TX_RING_SIZE]; TX_RING_SIZE 268 drivers/net/ethernet/amd/declance.c char *tx_buf_ptr_cpu[TX_RING_SIZE]; TX_RING_SIZE 272 drivers/net/ethernet/amd/declance.c uint tx_buf_ptr_lnc[TX_RING_SIZE]; TX_RING_SIZE 495 drivers/net/ethernet/amd/declance.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 1094 drivers/net/ethernet/amd/declance.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 1139 drivers/net/ethernet/amd/declance.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 1170 drivers/net/ethernet/amd/declance.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 194 drivers/net/ethernet/amd/lance.c #define TX_RING_MOD_MASK (TX_RING_SIZE - 1) TX_RING_SIZE 238 drivers/net/ethernet/amd/lance.c struct lance_tx_head tx_ring[TX_RING_SIZE]; TX_RING_SIZE 242 drivers/net/ethernet/amd/lance.c struct sk_buff* tx_skbuff[TX_RING_SIZE]; TX_RING_SIZE 559 drivers/net/ethernet/amd/lance.c lp->tx_bounce_buffs = kmalloc_array(TX_RING_SIZE, PKT_BUF_SZ, TX_RING_SIZE 850 drivers/net/ethernet/amd/lance.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 887 drivers/net/ethernet/amd/lance.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 936 drivers/net/ethernet/amd/lance.c for (i = 0; i < TX_RING_SIZE; i++) TX_RING_SIZE 1011 drivers/net/ethernet/amd/lance.c if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE) TX_RING_SIZE 1094 drivers/net/ethernet/amd/lance.c if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) { TX_RING_SIZE 1098 drivers/net/ethernet/amd/lance.c dirty_tx += TX_RING_SIZE; TX_RING_SIZE 1104 drivers/net/ethernet/amd/lance.c dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) TX_RING_SIZE 1859 drivers/net/ethernet/amd/pcnet32.c lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */ TX_RING_SIZE 96 drivers/net/ethernet/amd/sun3lance.c #define TX_RING_MOD_MASK (TX_RING_SIZE - 1) TX_RING_SIZE 142 drivers/net/ethernet/amd/sun3lance.c struct lance_tx_head tx_head[TX_RING_SIZE]; TX_RING_SIZE 145 drivers/net/ethernet/amd/sun3lance.c char tx_data[TX_RING_SIZE][PKT_BUF_SZ]; TX_RING_SIZE 465 drivers/net/ethernet/amd/sun3lance.c for( i = 0; i < TX_RING_SIZE; i++ ) { TX_RING_SIZE 553 drivers/net/ethernet/amd/sun3lance.c for( i = 0 ; i < TX_RING_SIZE; i++ ) TX_RING_SIZE 182 drivers/net/ethernet/amd/sunlance.c #define TX_RING_MOD_MASK (TX_RING_SIZE - 1) TX_RING_SIZE 228 drivers/net/ethernet/amd/sunlance.c struct lance_tx_desc btx_ring[TX_RING_SIZE]; TX_RING_SIZE 230 drivers/net/ethernet/amd/sunlance.c u8 tx_buf [TX_RING_SIZE][TX_BUFF_SIZE]; TX_RING_SIZE 345 drivers/net/ethernet/amd/sunlance.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 401 drivers/net/ethernet/amd/sunlance.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 526 drivers/net/ethernet/dec/tulip/interrupt.c int maxtx = TX_RING_SIZE; TX_RING_SIZE 527 drivers/net/ethernet/dec/tulip/interrupt.c int maxoi = TX_RING_SIZE; TX_RING_SIZE 590 drivers/net/ethernet/dec/tulip/interrupt.c int entry = dirty_tx % TX_RING_SIZE; TX_RING_SIZE 644 drivers/net/ethernet/dec/tulip/interrupt.c if (tp->cur_tx - dirty_tx > TX_RING_SIZE) { TX_RING_SIZE 648 drivers/net/ethernet/dec/tulip/interrupt.c dirty_tx += TX_RING_SIZE; TX_RING_SIZE 652 drivers/net/ethernet/dec/tulip/interrupt.c if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2) TX_RING_SIZE 409 drivers/net/ethernet/dec/tulip/tulip.h struct ring_info tx_buffers[TX_RING_SIZE]; TX_RING_SIZE 598 drivers/net/ethernet/dec/tulip/tulip_core.c for (i = 0; i < TX_RING_SIZE; i++) TX_RING_SIZE 654 drivers/net/ethernet/dec/tulip/tulip_core.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 675 drivers/net/ethernet/dec/tulip/tulip_core.c entry = tp->cur_tx % TX_RING_SIZE; TX_RING_SIZE 683 drivers/net/ethernet/dec/tulip/tulip_core.c if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */ TX_RING_SIZE 685 drivers/net/ethernet/dec/tulip/tulip_core.c } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) { TX_RING_SIZE 687 drivers/net/ethernet/dec/tulip/tulip_core.c } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) { TX_RING_SIZE 693 drivers/net/ethernet/dec/tulip/tulip_core.c if (entry == TX_RING_SIZE-1) TX_RING_SIZE 718 drivers/net/ethernet/dec/tulip/tulip_core.c int entry = dirty_tx % TX_RING_SIZE; TX_RING_SIZE 815 drivers/net/ethernet/dec/tulip/tulip_core.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 1139 drivers/net/ethernet/dec/tulip/tulip_core.c if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) { TX_RING_SIZE 1147 drivers/net/ethernet/dec/tulip/tulip_core.c entry = tp->cur_tx++ % TX_RING_SIZE; TX_RING_SIZE 1154 drivers/net/ethernet/dec/tulip/tulip_core.c (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0; TX_RING_SIZE 1158 drivers/net/ethernet/dec/tulip/tulip_core.c entry = tp->cur_tx++ % TX_RING_SIZE; TX_RING_SIZE 1168 drivers/net/ethernet/dec/tulip/tulip_core.c if (entry == TX_RING_SIZE-1) TX_RING_SIZE 1176 drivers/net/ethernet/dec/tulip/tulip_core.c if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2) TX_RING_SIZE 1444 drivers/net/ethernet/dec/tulip/tulip_core.c sizeof(struct tulip_tx_desc) * TX_RING_SIZE, TX_RING_SIZE 1779 drivers/net/ethernet/dec/tulip/tulip_core.c sizeof (struct tulip_tx_desc) * TX_RING_SIZE, TX_RING_SIZE 1927 drivers/net/ethernet/dec/tulip/tulip_core.c sizeof (struct tulip_tx_desc) * TX_RING_SIZE, TX_RING_SIZE 298 drivers/net/ethernet/dec/tulip/winbond-840.c dma_addr_t tx_addr[TX_RING_SIZE]; TX_RING_SIZE 303 drivers/net/ethernet/dec/tulip/winbond-840.c struct sk_buff* tx_skbuff[TX_RING_SIZE]; TX_RING_SIZE 826 drivers/net/ethernet/dec/tulip/winbond-840.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 854 drivers/net/ethernet/dec/tulip/winbond-840.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 940 drivers/net/ethernet/dec/tulip/winbond-840.c for (i = 0; i < TX_RING_SIZE; i++) TX_RING_SIZE 979 drivers/net/ethernet/dec/tulip/winbond-840.c sizeof(struct w840_tx_desc)*TX_RING_SIZE, TX_RING_SIZE 991 drivers/net/ethernet/dec/tulip/winbond-840.c sizeof(struct w840_tx_desc)*TX_RING_SIZE, TX_RING_SIZE 1005 drivers/net/ethernet/dec/tulip/winbond-840.c entry = np->cur_tx % TX_RING_SIZE; TX_RING_SIZE 1020 drivers/net/ethernet/dec/tulip/winbond-840.c if(entry == TX_RING_SIZE-1) TX_RING_SIZE 1062 drivers/net/ethernet/dec/tulip/winbond-840.c int entry = np->dirty_tx % TX_RING_SIZE; TX_RING_SIZE 1510 drivers/net/ethernet/dec/tulip/winbond-840.c for (i = 0; i < TX_RING_SIZE; i++) TX_RING_SIZE 219 drivers/net/ethernet/dlink/dl2k.c else if (tx_coalesce > TX_RING_SIZE-1) TX_RING_SIZE 220 drivers/net/ethernet/dlink/dl2k.c tx_coalesce = TX_RING_SIZE - 1; TX_RING_SIZE 455 drivers/net/ethernet/dlink/dl2k.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 475 drivers/net/ethernet/dlink/dl2k.c for (i = 0; i < TX_RING_SIZE; i++) TX_RING_SIZE 492 drivers/net/ethernet/dlink/dl2k.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 495 drivers/net/ethernet/dlink/dl2k.c ((i + 1) % TX_RING_SIZE) * TX_RING_SIZE 724 drivers/net/ethernet/dlink/dl2k.c entry = np->cur_tx % TX_RING_SIZE; TX_RING_SIZE 761 drivers/net/ethernet/dlink/dl2k.c np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE; TX_RING_SIZE 762 drivers/net/ethernet/dlink/dl2k.c if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE TX_RING_SIZE 823 drivers/net/ethernet/dlink/dl2k.c int entry = np->old_tx % TX_RING_SIZE; TX_RING_SIZE 848 drivers/net/ethernet/dlink/dl2k.c entry = (entry + 1) % TX_RING_SIZE; TX_RING_SIZE 861 drivers/net/ethernet/dlink/dl2k.c ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE TX_RING_SIZE 36 drivers/net/ethernet/dlink/dl2k.h #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used.*/ TX_RING_SIZE 38 drivers/net/ethernet/dlink/dl2k.h #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc) TX_RING_SIZE 369 drivers/net/ethernet/dlink/dl2k.h struct sk_buff *tx_skbuff[TX_RING_SIZE]; TX_RING_SIZE 69 drivers/net/ethernet/dlink/sundance.c #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */ TX_RING_SIZE 72 drivers/net/ethernet/dlink/sundance.c #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc) TX_RING_SIZE 374 drivers/net/ethernet/dlink/sundance.c struct sk_buff* tx_skbuff[TX_RING_SIZE]; TX_RING_SIZE 988 drivers/net/ethernet/dlink/sundance.c for (i=0; i<TX_RING_SIZE; i++) { TX_RING_SIZE 1001 drivers/net/ethernet/dlink/sundance.c np->cur_tx, np->cur_tx % TX_RING_SIZE, TX_RING_SIZE 1002 drivers/net/ethernet/dlink/sundance.c np->dirty_tx, np->dirty_tx % TX_RING_SIZE); TX_RING_SIZE 1066 drivers/net/ethernet/dlink/sundance.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 1076 drivers/net/ethernet/dlink/sundance.c unsigned head = np->cur_task % TX_RING_SIZE; TX_RING_SIZE 1078 drivers/net/ethernet/dlink/sundance.c &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE]; TX_RING_SIZE 1082 drivers/net/ethernet/dlink/sundance.c int entry = np->cur_task % TX_RING_SIZE; TX_RING_SIZE 1106 drivers/net/ethernet/dlink/sundance.c entry = np->cur_tx % TX_RING_SIZE; TX_RING_SIZE 1160 drivers/net/ethernet/dlink/sundance.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 1271 drivers/net/ethernet/dlink/sundance.c int entry = np->dirty_tx % TX_RING_SIZE; TX_RING_SIZE 1281 drivers/net/ethernet/dlink/sundance.c TX_RING_SIZE) TX_RING_SIZE 1297 drivers/net/ethernet/dlink/sundance.c int entry = np->dirty_tx % TX_RING_SIZE; TX_RING_SIZE 1884 drivers/net/ethernet/dlink/sundance.c for (i = 0; i < TX_RING_SIZE; i++) TX_RING_SIZE 1915 drivers/net/ethernet/dlink/sundance.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 60 drivers/net/ethernet/fealnx.c #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct fealnx_desc) TX_RING_SIZE 1212 drivers/net/ethernet/fealnx.c for (i = 0; i < TX_RING_SIZE; i++) TX_RING_SIZE 1278 drivers/net/ethernet/fealnx.c np->free_tx_count = TX_RING_SIZE; TX_RING_SIZE 1280 drivers/net/ethernet/fealnx.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 1383 drivers/net/ethernet/fealnx.c np->free_tx_count = TX_RING_SIZE; TX_RING_SIZE 1385 drivers/net/ethernet/fealnx.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 1401 drivers/net/ethernet/fealnx.c np->tx_ring[TX_RING_SIZE - 1].next_desc = np->tx_ring_dma; TX_RING_SIZE 1402 drivers/net/ethernet/fealnx.c np->tx_ring[TX_RING_SIZE - 1].next_desc_logical = &np->tx_ring[0]; TX_RING_SIZE 1921 drivers/net/ethernet/fealnx.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 475 drivers/net/ethernet/freescale/fec.h unsigned char *tx_bounce[TX_RING_SIZE]; TX_RING_SIZE 476 drivers/net/ethernet/freescale/fec.h struct sk_buff *tx_skbuff[TX_RING_SIZE]; TX_RING_SIZE 2818 drivers/net/ethernet/freescale/fec_main.c txq->bd.ring_size = TX_RING_SIZE; TX_RING_SIZE 97 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c int tx_left = TX_RING_SIZE; TX_RING_SIZE 949 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c fpi->tx_ring = TX_RING_SIZE; TX_RING_SIZE 334 drivers/net/ethernet/i825xx/82596.c struct tx_cmd tx_cmds[TX_RING_SIZE]; TX_RING_SIZE 335 drivers/net/ethernet/i825xx/82596.c struct i596_tbd tbds[TX_RING_SIZE]; TX_RING_SIZE 372 drivers/net/ethernet/i825xx/82596.c static int max_cmd_backlog = TX_RING_SIZE-1; TX_RING_SIZE 1077 drivers/net/ethernet/i825xx/82596.c if (++lp->next_tx_cmd == TX_RING_SIZE) TX_RING_SIZE 310 drivers/net/ethernet/i825xx/lib82596.c struct tx_cmd tx_cmds[TX_RING_SIZE] __attribute__((aligned(32))); TX_RING_SIZE 311 drivers/net/ethernet/i825xx/lib82596.c struct i596_tbd tbds[TX_RING_SIZE] __attribute__((aligned(32))); TX_RING_SIZE 362 drivers/net/ethernet/i825xx/lib82596.c static int max_cmd_backlog = TX_RING_SIZE-1; TX_RING_SIZE 999 drivers/net/ethernet/i825xx/lib82596.c if (++lp->next_tx_cmd == TX_RING_SIZE) TX_RING_SIZE 547 drivers/net/ethernet/natsemi/natsemi.c struct sk_buff *tx_skbuff[TX_RING_SIZE]; TX_RING_SIZE 548 drivers/net/ethernet/natsemi/natsemi.c dma_addr_t tx_dma[TX_RING_SIZE]; TX_RING_SIZE 1868 drivers/net/ethernet/natsemi/natsemi.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 1920 drivers/net/ethernet/natsemi/natsemi.c sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE), TX_RING_SIZE 1978 drivers/net/ethernet/natsemi/natsemi.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 1982 drivers/net/ethernet/natsemi/natsemi.c *((i+1)%TX_RING_SIZE+RX_RING_SIZE)); TX_RING_SIZE 2014 drivers/net/ethernet/natsemi/natsemi.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 2056 drivers/net/ethernet/natsemi/natsemi.c sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE), TX_RING_SIZE 2084 drivers/net/ethernet/natsemi/natsemi.c for (i=0;i<TX_RING_SIZE;i++) TX_RING_SIZE 2101 drivers/net/ethernet/natsemi/natsemi.c entry = np->cur_tx % TX_RING_SIZE; TX_RING_SIZE 2148 drivers/net/ethernet/natsemi/natsemi.c int entry = np->dirty_tx % TX_RING_SIZE; TX_RING_SIZE 121 drivers/net/ethernet/packetengines/hamachi.c #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct hamachi_desc) TX_RING_SIZE 487 drivers/net/ethernet/packetengines/hamachi.c struct sk_buff* tx_skbuff[TX_RING_SIZE]; TX_RING_SIZE 996 drivers/net/ethernet/packetengines/hamachi.c int entry = hmp->dirty_tx % TX_RING_SIZE; TX_RING_SIZE 1011 drivers/net/ethernet/packetengines/hamachi.c if (entry >= TX_RING_SIZE-1) TX_RING_SIZE 1012 drivers/net/ethernet/packetengines/hamachi.c hmp->tx_ring[TX_RING_SIZE-1].status_n_length |= TX_RING_SIZE 1061 drivers/net/ethernet/packetengines/hamachi.c for (i = 0; i < TX_RING_SIZE; i++) TX_RING_SIZE 1084 drivers/net/ethernet/packetengines/hamachi.c for (i = 0; i < TX_RING_SIZE; i++){ TX_RING_SIZE 1087 drivers/net/ethernet/packetengines/hamachi.c if (i >= TX_RING_SIZE - 1) TX_RING_SIZE 1195 drivers/net/ethernet/packetengines/hamachi.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 1200 drivers/net/ethernet/packetengines/hamachi.c hmp->tx_ring[TX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing); TX_RING_SIZE 1232 drivers/net/ethernet/packetengines/hamachi.c entry = hmp->cur_tx % TX_RING_SIZE; TX_RING_SIZE 1248 drivers/net/ethernet/packetengines/hamachi.c if (entry >= TX_RING_SIZE-1) /* Wrap ring */ TX_RING_SIZE 1273 drivers/net/ethernet/packetengines/hamachi.c if ((hmp->cur_tx - hmp->dirty_tx) < (TX_RING_SIZE - 4)) TX_RING_SIZE 1328 drivers/net/ethernet/packetengines/hamachi.c int entry = hmp->dirty_tx % TX_RING_SIZE; TX_RING_SIZE 1344 drivers/net/ethernet/packetengines/hamachi.c if (entry >= TX_RING_SIZE-1) TX_RING_SIZE 1345 drivers/net/ethernet/packetengines/hamachi.c hmp->tx_ring[TX_RING_SIZE-1].status_n_length |= TX_RING_SIZE 1349 drivers/net/ethernet/packetengines/hamachi.c if (hmp->cur_tx - hmp->dirty_tx < TX_RING_SIZE - 4){ TX_RING_SIZE 1673 drivers/net/ethernet/packetengines/hamachi.c for (i = 0; i < TX_RING_SIZE; i++) TX_RING_SIZE 1715 drivers/net/ethernet/packetengines/hamachi.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 76 drivers/net/ethernet/packetengines/yellowfin.c #define STATUS_TOTAL_SIZE TX_RING_SIZE*sizeof(struct tx_status_words) TX_RING_SIZE 77 drivers/net/ethernet/packetengines/yellowfin.c #define TX_TOTAL_SIZE 2*TX_RING_SIZE*sizeof(struct yellowfin_desc) TX_RING_SIZE 312 drivers/net/ethernet/packetengines/yellowfin.c struct sk_buff* tx_skbuff[TX_RING_SIZE]; TX_RING_SIZE 698 drivers/net/ethernet/packetengines/yellowfin.c for (i = 0; i < TX_RING_SIZE; i++) TX_RING_SIZE 757 drivers/net/ethernet/packetengines/yellowfin.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 761 drivers/net/ethernet/packetengines/yellowfin.c ((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc)); TX_RING_SIZE 768 drivers/net/ethernet/packetengines/yellowfin.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 794 drivers/net/ethernet/packetengines/yellowfin.c ((j+1)%(2*TX_RING_SIZE))*sizeof(struct yellowfin_desc)); TX_RING_SIZE 817 drivers/net/ethernet/packetengines/yellowfin.c entry = yp->cur_tx % TX_RING_SIZE; TX_RING_SIZE 837 drivers/net/ethernet/packetengines/yellowfin.c if (entry >= TX_RING_SIZE-1) { TX_RING_SIZE 840 drivers/net/ethernet/packetengines/yellowfin.c yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd = TX_RING_SIZE 857 drivers/net/ethernet/packetengines/yellowfin.c unsigned next_entry = yp->cur_tx % TX_RING_SIZE; TX_RING_SIZE 917 drivers/net/ethernet/packetengines/yellowfin.c int entry = yp->dirty_tx % TX_RING_SIZE; TX_RING_SIZE 944 drivers/net/ethernet/packetengines/yellowfin.c int entry = dirty_tx % TX_RING_SIZE; TX_RING_SIZE 993 drivers/net/ethernet/packetengines/yellowfin.c if (yp->cur_tx - dirty_tx > TX_RING_SIZE) { TX_RING_SIZE 996 drivers/net/ethernet/packetengines/yellowfin.c dirty_tx += TX_RING_SIZE; TX_RING_SIZE 1008 drivers/net/ethernet/packetengines/yellowfin.c yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE]; TX_RING_SIZE 1216 drivers/net/ethernet/packetengines/yellowfin.c for (i = 0; i < TX_RING_SIZE*2; i++) TX_RING_SIZE 1222 drivers/net/ethernet/packetengines/yellowfin.c for (i = 0; i < TX_RING_SIZE; i++) TX_RING_SIZE 1260 drivers/net/ethernet/packetengines/yellowfin.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 465 drivers/net/ethernet/pasemi/pasemi_mac.c ring->size = TX_RING_SIZE; TX_RING_SIZE 466 drivers/net/ethernet/pasemi/pasemi_mac.c ring->ring_info = kcalloc(TX_RING_SIZE, TX_RING_SIZE 473 drivers/net/ethernet/pasemi/pasemi_mac.c if (pasemi_dma_alloc_ring(&ring->chan, TX_RING_SIZE)) TX_RING_SIZE 479 drivers/net/ethernet/pasemi/pasemi_mac.c val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 3); TX_RING_SIZE 521 drivers/net/ethernet/pasemi/pasemi_mac.c limit += TX_RING_SIZE; TX_RING_SIZE 524 drivers/net/ethernet/pasemi/pasemi_mac.c info = &txring->ring_info[(i+1) & (TX_RING_SIZE-1)]; TX_RING_SIZE 529 drivers/net/ethernet/pasemi/pasemi_mac.c (TX_RING_SIZE-1)].dma; TX_RING_SIZE 840 drivers/net/ethernet/pasemi/pasemi_mac.c ring_limit += TX_RING_SIZE; TX_RING_SIZE 888 drivers/net/ethernet/pasemi/pasemi_mac.c txring->next_to_clean = i & (TX_RING_SIZE-1); TX_RING_SIZE 1518 drivers/net/ethernet/pasemi/pasemi_mac.c txring->next_to_fill = (fill + nfrags + 1) & (TX_RING_SIZE-1); TX_RING_SIZE 20 drivers/net/ethernet/pasemi/pasemi_mac.h #define CS_RING_SIZE (TX_RING_SIZE*2) TX_RING_SIZE 94 drivers/net/ethernet/pasemi/pasemi_mac.h #define TX_DESC(tx, num) ((tx)->chan.ring_virt[(num) & (TX_RING_SIZE-1)]) TX_RING_SIZE 95 drivers/net/ethernet/pasemi/pasemi_mac.h #define TX_DESC_INFO(tx, num) ((tx)->ring_info[(num) & (TX_RING_SIZE-1)]) TX_RING_SIZE 76 drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c ering->tx_max_pending = TX_RING_SIZE/2; TX_RING_SIZE 581 drivers/net/ethernet/qlogic/qede/qede.h #define NUM_TX_BDS_MAX (TX_RING_SIZE - 1) TX_RING_SIZE 3266 drivers/net/ethernet/renesas/sh_eth.c mdp->num_tx_ring = TX_RING_SIZE; TX_RING_SIZE 1254 drivers/net/ethernet/sgi/ioc3-eth.c ip->txr = dma_direct_alloc_pages(ip->dma_dev, TX_RING_SIZE, TX_RING_SIZE 1319 drivers/net/ethernet/sgi/ioc3-eth.c dma_direct_free_pages(ip->dma_dev, TX_RING_SIZE, ip->txr, TX_RING_SIZE 1340 drivers/net/ethernet/sgi/ioc3-eth.c dma_direct_free_pages(ip->dma_dev, TX_RING_SIZE, ip->txr, TX_RING_SIZE 56 drivers/net/ethernet/smsc/epic100.c #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct epic_tx_desc) TX_RING_SIZE 256 drivers/net/ethernet/smsc/epic100.c struct sk_buff* tx_skbuff[TX_RING_SIZE]; TX_RING_SIZE 808 drivers/net/ethernet/smsc/epic100.c (ep->dirty_tx % TX_RING_SIZE) * sizeof(struct epic_tx_desc)); TX_RING_SIZE 928 drivers/net/ethernet/smsc/epic100.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 954 drivers/net/ethernet/smsc/epic100.c entry = ep->cur_tx % TX_RING_SIZE; TX_RING_SIZE 1023 drivers/net/ethernet/smsc/epic100.c int entry = dirty_tx % TX_RING_SIZE; TX_RING_SIZE 1045 drivers/net/ethernet/smsc/epic100.c if (cur_tx - dirty_tx > TX_RING_SIZE) { TX_RING_SIZE 1048 drivers/net/ethernet/smsc/epic100.c dirty_tx += TX_RING_SIZE; TX_RING_SIZE 1302 drivers/net/ethernet/smsc/epic100.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 504 drivers/net/ethernet/smsc/smsc9420.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 923 drivers/net/ethernet/smsc/smsc9420.c pd->tx_ring_tail = (pd->tx_ring_tail + 1) % TX_RING_SIZE; TX_RING_SIZE 935 drivers/net/ethernet/smsc/smsc9420.c (((pd->tx_ring_head + 2) % TX_RING_SIZE) == pd->tx_ring_tail); TX_RING_SIZE 962 drivers/net/ethernet/smsc/smsc9420.c if (unlikely(index == (TX_RING_SIZE - 1))) TX_RING_SIZE 970 drivers/net/ethernet/smsc/smsc9420.c pd->tx_ring_head = (pd->tx_ring_head + 1) % TX_RING_SIZE; TX_RING_SIZE 1185 drivers/net/ethernet/smsc/smsc9420.c pd->tx_buffers = kmalloc_array(TX_RING_SIZE, TX_RING_SIZE 1192 drivers/net/ethernet/smsc/smsc9420.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 1200 drivers/net/ethernet/smsc/smsc9420.c pd->tx_ring[TX_RING_SIZE - 1].length = TDES1_TER_; TX_RING_SIZE 1571 drivers/net/ethernet/smsc/smsc9420.c sizeof(struct smsc9420_dma_desc) * TX_RING_SIZE, TX_RING_SIZE 1629 drivers/net/ethernet/smsc/smsc9420.c (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr); TX_RING_SIZE 1662 drivers/net/ethernet/smsc/smsc9420.c (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr); TX_RING_SIZE 204 drivers/net/ethernet/sun/sunbmac.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 249 drivers/net/ethernet/sun/sunbmac.c for (i = 0; i < TX_RING_SIZE; i++) TX_RING_SIZE 255 drivers/net/ethernet/sun/sunbmac.h #define NEXT_TX(num) (((num) + 1) & (TX_RING_SIZE - 1)) TX_RING_SIZE 257 drivers/net/ethernet/sun/sunbmac.h #define PREV_TX(num) (((num) - 1) & (TX_RING_SIZE - 1)) TX_RING_SIZE 261 drivers/net/ethernet/sun/sunbmac.h (bp)->tx_old + (TX_RING_SIZE - 1) - (bp)->tx_new : \ TX_RING_SIZE 300 drivers/net/ethernet/sun/sunbmac.h struct sk_buff *tx_skbs[TX_RING_SIZE]; TX_RING_SIZE 665 drivers/net/ethernet/sun/sungem.c last &= (TX_RING_SIZE - 1); TX_RING_SIZE 994 drivers/net/ethernet/sun/sungem.c if (!(entry & ((TX_RING_SIZE>>1)-1))) TX_RING_SIZE 1600 drivers/net/ethernet/sun/sungem.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 1609 drivers/net/ethernet/sun/sungem.c int ent = i & (TX_RING_SIZE - 1); TX_RING_SIZE 1663 drivers/net/ethernet/sun/sungem.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 886 drivers/net/ethernet/sun/sungem.h #if TX_RING_SIZE == 32 TX_RING_SIZE 888 drivers/net/ethernet/sun/sungem.h #elif TX_RING_SIZE == 64 TX_RING_SIZE 890 drivers/net/ethernet/sun/sungem.h #elif TX_RING_SIZE == 128 TX_RING_SIZE 892 drivers/net/ethernet/sun/sungem.h #elif TX_RING_SIZE == 256 TX_RING_SIZE 894 drivers/net/ethernet/sun/sungem.h #elif TX_RING_SIZE == 512 TX_RING_SIZE 896 drivers/net/ethernet/sun/sungem.h #elif TX_RING_SIZE == 1024 TX_RING_SIZE 898 drivers/net/ethernet/sun/sungem.h #elif TX_RING_SIZE == 2048 TX_RING_SIZE 900 drivers/net/ethernet/sun/sungem.h #elif TX_RING_SIZE == 4096 TX_RING_SIZE 902 drivers/net/ethernet/sun/sungem.h #elif TX_RING_SIZE == 8192 TX_RING_SIZE 930 drivers/net/ethernet/sun/sungem.h #define NEXT_TX(N) (((N) + 1) & (TX_RING_SIZE - 1)) TX_RING_SIZE 935 drivers/net/ethernet/sun/sungem.h (GP)->tx_old + (TX_RING_SIZE - 1) - (GP)->tx_new : \ TX_RING_SIZE 943 drivers/net/ethernet/sun/sungem.h #if TX_RING_SIZE < 128 TX_RING_SIZE 946 drivers/net/ethernet/sun/sungem.h #define INIT_BLOCK_TX_RING_SIZE TX_RING_SIZE TX_RING_SIZE 1015 drivers/net/ethernet/sun/sungem.h struct sk_buff *tx_skbs[TX_RING_SIZE]; TX_RING_SIZE 145 drivers/net/ethernet/sun/sunhme.c for (i = 0; i < TX_RING_SIZE; i+=4) { TX_RING_SIZE 1217 drivers/net/ethernet/sun/sunhme.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 1291 drivers/net/ethernet/sun/sunhme.c for (i = 0; i < TX_RING_SIZE; i++) TX_RING_SIZE 1622 drivers/net/ethernet/sun/sunhme.c HMD(("tx rsize=%d oreg[%08x], ", (int)TX_RING_SIZE, TX_RING_SIZE 1624 drivers/net/ethernet/sun/sunhme.c hme_write32(hp, etxregs + ETX_RSIZE, (TX_RING_SIZE >> ETX_RSIZE_SHIFT) - 1); TX_RING_SIZE 1943 drivers/net/ethernet/sun/sunhme.c last &= (TX_RING_SIZE - 1); TX_RING_SIZE 334 drivers/net/ethernet/sun/sunhme.h #if (TX_RING_SIZE < 16 || TX_RING_SIZE > 256 || (TX_RING_SIZE % 16) != 0) TX_RING_SIZE 361 drivers/net/ethernet/sun/sunhme.h #define NEXT_TX(num) (((num) + 1) & (TX_RING_SIZE - 1)) TX_RING_SIZE 363 drivers/net/ethernet/sun/sunhme.h #define PREV_TX(num) (((num) - 1) & (TX_RING_SIZE - 1)) TX_RING_SIZE 367 drivers/net/ethernet/sun/sunhme.h (hp)->tx_old + (TX_RING_SIZE - 1) - (hp)->tx_new : \ TX_RING_SIZE 418 drivers/net/ethernet/sun/sunhme.h struct sk_buff *tx_skbs[TX_RING_SIZE]; TX_RING_SIZE 588 drivers/net/ethernet/sun/sunqe.c txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0]; TX_RING_SIZE 590 drivers/net/ethernet/sun/sunqe.c qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1))); TX_RING_SIZE 301 drivers/net/ethernet/sun/sunqe.h (qp)->tx_old + (TX_RING_SIZE - 1) - (qp)->tx_new : \ TX_RING_SIZE 326 drivers/net/ethernet/sun/sunqe.h u8 tx_buf[TX_RING_SIZE][PKT_BUF_SZ]; TX_RING_SIZE 80 drivers/net/ethernet/via/via-rhine.c #define TX_QUEUE_LEN (TX_RING_SIZE - 6) /* Limit ring entries actually used. */ TX_RING_SIZE 452 drivers/net/ethernet/via/via-rhine.c struct sk_buff *tx_skbuff[TX_RING_SIZE]; TX_RING_SIZE 453 drivers/net/ethernet/via/via-rhine.c dma_addr_t tx_skbuff_dma[TX_RING_SIZE]; TX_RING_SIZE 456 drivers/net/ethernet/via/via-rhine.c unsigned char *tx_buf[TX_RING_SIZE]; TX_RING_SIZE 1161 drivers/net/ethernet/via/via-rhine.c TX_RING_SIZE * sizeof(struct tx_desc), TX_RING_SIZE 1170 drivers/net/ethernet/via/via-rhine.c PKT_BUF_SZ * TX_RING_SIZE, TX_RING_SIZE 1176 drivers/net/ethernet/via/via-rhine.c TX_RING_SIZE * sizeof(struct tx_desc), TX_RING_SIZE 1197 drivers/net/ethernet/via/via-rhine.c TX_RING_SIZE * sizeof(struct tx_desc), TX_RING_SIZE 1202 drivers/net/ethernet/via/via-rhine.c dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE, TX_RING_SIZE 1323 drivers/net/ethernet/via/via-rhine.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 1343 drivers/net/ethernet/via/via-rhine.c for (i = 0; i < TX_RING_SIZE; i++) { TX_RING_SIZE 1793 drivers/net/ethernet/via/via-rhine.c entry = rp->cur_tx % TX_RING_SIZE; TX_RING_SIZE 1943 drivers/net/ethernet/via/via-rhine.c unsigned int entry = dirty_tx % TX_RING_SIZE; TX_RING_SIZE 2158 drivers/net/ethernet/via/via-rhine.c int entry = rp->dirty_tx % TX_RING_SIZE;