Home
last modified time | relevance | path

Searched refs:MAX_SKB_FRAGS (Results 1 – 90 of 90) sorted by relevance

/linux-4.1.27/drivers/net/xen-netback/
Dcommon.h92 #define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
206 #define XEN_NETBK_RX_SLOTS_MAX (MAX_SKB_FRAGS + 1)
Dnetback.c830 if (shinfo->nr_frags > MAX_SKB_FRAGS) { in xenvif_get_requests()
831 frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS; in xenvif_get_requests()
832 BUG_ON(frag_overflow > MAX_SKB_FRAGS); in xenvif_get_requests()
833 shinfo->nr_frags = MAX_SKB_FRAGS; in xenvif_get_requests()
1345 skb_frag_t frags[MAX_SKB_FRAGS]; in xenvif_handle_frag_list()
1364 BUG_ON(i >= MAX_SKB_FRAGS); in xenvif_handle_frag_list()
/linux-4.1.27/Documentation/networking/
Ddriver.txt32 if (TX_BUFFS_AVAIL(dp) <= (MAX_SKB_FRAGS + 1))
44 TX_BUFFS_AVAIL(dp) > (MAX_SKB_FRAGS + 1))
/linux-4.1.27/drivers/net/
Dvirtio_net.c71 struct scatterlist sg[MAX_SKB_FRAGS + 2];
94 struct scatterlist sg[MAX_SKB_FRAGS + 2];
293 if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { in page_to_skb()
380 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { in receive_mergeable()
543 sg_init_table(rq->sg, MAX_SKB_FRAGS + 2); in add_recvbuf_small()
561 sg_init_table(rq->sg, MAX_SKB_FRAGS + 2); in add_recvbuf_big()
564 for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { in add_recvbuf_big()
595 err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2, in add_recvbuf_big()
896 sg_init_table(sq->sg, MAX_SKB_FRAGS + 2); in xmit_skb()
952 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { in start_xmit()
[all …]
Dxen-netfront.c240 (NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2); in netfront_tx_slot_available()
548 if (unlikely(slots > MAX_SKB_FRAGS + 1)) { in xennet_start_xmit()
722 int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); in xennet_get_responses()
834 if (shinfo->nr_frags == MAX_SKB_FRAGS) { in xennet_fill_frags()
840 BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS); in xennet_fill_frags()
Dmacvtap.c716 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) in macvtap_get_user()
Dtun.c1095 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) in tun_get_user()
/linux-4.1.27/drivers/infiniband/ulp/ipoib/
Dipoib_verbs.c211 init_attr.cap.max_send_sge = MAX_SKB_FRAGS + 1; in ipoib_transport_dev_init()
223 for (i = 0; i < MAX_SKB_FRAGS + 1; ++i) in ipoib_transport_dev_init()
Dipoib.h176 u64 mapping[MAX_SKB_FRAGS + 1];
363 struct ib_sge tx_sge[MAX_SKB_FRAGS + 1];
/linux-4.1.27/net/core/
Ddatagram.c536 struct page *pages[MAX_SKB_FRAGS]; in zerocopy_sg_from_iter()
542 if (frag == MAX_SKB_FRAGS) in zerocopy_sg_from_iter()
546 MAX_SKB_FRAGS - frag, &start); in zerocopy_sg_from_iter()
Dskbuff.c82 int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
1829 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) in spd_fill_page()
1933 struct partial_page partial[MAX_SKB_FRAGS]; in skb_splice_bits()
1934 struct page *pages[MAX_SKB_FRAGS]; in skb_splice_bits()
1938 .nr_pages_max = MAX_SKB_FRAGS, in skb_splice_bits()
2246 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) in skb_zerocopy_headlen()
2670 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) in skb_shift()
2677 if (to == MAX_SKB_FRAGS) in skb_shift()
2934 if (frg_cnt >= MAX_SKB_FRAGS) in skb_append_datato_frags()
3154 MAX_SKB_FRAGS)) { in skb_segment()
[all …]
Dsysctl_net_core.c29 static int max_skb_frags = MAX_SKB_FRAGS;
Dpktgen.c2669 if (frags > MAX_SKB_FRAGS) in pktgen_finalize_skb()
2670 frags = MAX_SKB_FRAGS; in pktgen_finalize_skb()
/linux-4.1.27/include/scsi/
Dfc_frame.h60 #define FC_FRAME_SG_LEN (MAX_SKB_FRAGS - 1)
/linux-4.1.27/drivers/net/ethernet/intel/i40evf/
Di40e_txrx.h129 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
/linux-4.1.27/drivers/scsi/cxgbi/
Dlibcxgbi.h180 dma_addr_t addr[MAX_SKB_FRAGS + 1];
437 #define SKB_WR_LIST_SIZE (MAX_SKB_FRAGS + 2)
Dlibcxgbi.c2102 if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) && in cxgbi_conn_alloc_pdu()
2194 if (tdata->nr_frags > MAX_SKB_FRAGS || in cxgbi_conn_init_pdu()
2195 (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) { in cxgbi_conn_init_pdu()
2355 unsigned int max_def = 512 * MAX_SKB_FRAGS; in cxgbi_conn_max_xmit_dlength()
/linux-4.1.27/drivers/net/ethernet/intel/i40e/
Di40e_txrx.h129 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb4vf/
Dadapter.h152 struct page_frag frags[MAX_SKB_FRAGS];
Dsge.c105 ETHTXQ_MAX_FRAGS = MAX_SKB_FRAGS + 1,
916 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1]; in write_sgl()
1171 dma_addr_t addr[MAX_SKB_FRAGS + 1]; in t4vf_eth_xmit()
1786 BUG_ON(frag >= MAX_SKB_FRAGS); in process_responses()
/linux-4.1.27/drivers/net/ethernet/pasemi/
Dpasemi_mac.c559 dma_addr_t dmas[MAX_SKB_FRAGS+1]; in pasemi_mac_free_tx_resources()
861 #define TX_CLEAN_BATCHSIZE (128/MAX_SKB_FRAGS)
873 dma_addr_t dmas[TX_CLEAN_BATCHSIZE][MAX_SKB_FRAGS+1]; in pasemi_mac_clean_tx()
1482 dma_addr_t map[MAX_SKB_FRAGS+1]; in pasemi_mac_start_tx()
1483 unsigned int map_size[MAX_SKB_FRAGS+1]; in pasemi_mac_start_tx()
/linux-4.1.27/drivers/net/ethernet/qlogic/
Dqla3xxx.h1017 #define MAX_OAL_CNT ((MAX_SKB_FRAGS-1)/4 + 1)
1041 struct map_list map[MAX_SKB_FRAGS+1];
/linux-4.1.27/drivers/infiniband/hw/amso1100/
Dc2.c369 if (c2_port->tx_avail > MAX_SKB_FRAGS + 1) in c2_tx_clean()
414 && c2_port->tx_avail > MAX_SKB_FRAGS + 1) in c2_tx_interrupt()
826 if (c2_port->tx_avail <= MAX_SKB_FRAGS + 1) { in c2_xmit_frame()
/linux-4.1.27/drivers/net/ethernet/neterion/vxge/
Dvxge-main.h412 dma_addr_t dma_buffers[MAX_SKB_FRAGS+1];
Dvxge-main.c3768 MAX_SKB_FRAGS + 1; in vxge_config_vpaths()
/linux-4.1.27/net/xfrm/
Dxfrm_ipcomp.c76 if (WARN_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) in ipcomp_decompress()
/linux-4.1.27/drivers/net/ethernet/intel/ixgbevf/
Dixgbevf.h50 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
/linux-4.1.27/drivers/net/ethernet/qlogic/qlge/
Dqlge.h64 #if ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) > 0
65 #define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2)
1356 struct map_list map[MAX_SKB_FRAGS + 2];
/linux-4.1.27/drivers/net/ethernet/tile/
Dtilepro.c1861 lepp_frag_t frags[1 + MAX_SKB_FRAGS]; in tile_net_tx()
1876 lepp_cmd_t cmds[1 + MAX_SKB_FRAGS]; in tile_net_tx()
2265 if (LEPP_MAX_FRAGS >= 1 + MAX_SKB_FRAGS) in tile_net_setup()
Dtilegx.c80 #define MAX_FRAGS (MAX_SKB_FRAGS + 1)
/linux-4.1.27/drivers/net/ethernet/intel/igb/
Digb.h177 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
/linux-4.1.27/drivers/net/ethernet/amd/xgbe/
Dxgbe.h152 #define XGBE_TX_MAX_DESCS (MAX_SKB_FRAGS + XGBE_TX_MAX_SPLIT + 2)
/linux-4.1.27/drivers/net/ethernet/intel/fm10k/
Dfm10k.h401 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
/linux-4.1.27/drivers/net/ethernet/freescale/fs_enet/
Dfs_enet-main.c308 if (++fep->tx_free >= MAX_SKB_FRAGS) in fs_enet_tx_napi()
614 if (fep->tx_free < MAX_SKB_FRAGS) in fs_enet_start_xmit()
/linux-4.1.27/drivers/net/ethernet/calxeda/
Dxgmac.c910 (tx_dma_ring_space(priv) > MAX_SKB_FRAGS))) in xgmac_tx_complete()
1150 if (unlikely(tx_dma_ring_space(priv) <= MAX_SKB_FRAGS)) { in xgmac_xmit()
1154 if (tx_dma_ring_space(priv) > MAX_SKB_FRAGS) in xgmac_xmit()
/linux-4.1.27/drivers/net/ethernet/3com/
Dtyphoon.c142 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
147 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
843 numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1; in typhoon_start_tx()
1543 int numDesc = MAX_SKB_FRAGS + 1; in typhoon_tx_complete()
D3c59x.c557 #ifdef MAX_SKB_FRAGS
570 } frag[1+MAX_SKB_FRAGS];
/linux-4.1.27/drivers/net/ethernet/alteon/
Dacenic.h701 #define TX_RESERVED MAX_SKB_FRAGS
/linux-4.1.27/drivers/net/ethernet/sfc/
Dtx.c119 unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS; in efx_tx_max_skb_descs()
129 max_descs += max_t(unsigned int, MAX_SKB_FRAGS, in efx_tx_max_skb_descs()
/linux-4.1.27/drivers/net/ethernet/qlogic/netxen/
Dnetxen_nic.h178 #define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
561 struct netxen_skb_frag frag_array[MAX_SKB_FRAGS + 1];
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb4/
Dsge.c129 (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
860 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1]; in write_sgl()
1144 dma_addr_t addr[MAX_SKB_FRAGS + 1]; in t4_eth_xmit()
Dcxgb4.h469 struct page_frag frags[MAX_SKB_FRAGS];
/linux-4.1.27/drivers/net/ethernet/sun/
Dsungem.c703 TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))) { in gem_tx()
708 TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1)) in gem_tx()
1095 if (unlikely(TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1))) { in gem_start_xmit()
1104 if (TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1)) in gem_start_xmit()
Dsunhme.c1972 TX_BUFFS_AVAIL(hp) > (MAX_SKB_FRAGS + 1)) in happy_meal_tx()
2370 if (TX_BUFFS_AVAIL(hp) <= (MAX_SKB_FRAGS + 1)) in happy_meal_start_xmit()
Dcassini.c1921 (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))) in cas_tx_ringN()
2854 if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)) in cas_xmit_tx_ringN()
Dniu.c6739 if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) { in niu_start_xmit()
/linux-4.1.27/drivers/net/ethernet/tundra/
Dtsi108_eth.c668 if (data->txfree < MAX_SKB_FRAGS + 1) { in tsi108_send_packet()
677 if (data->txfree - frags < MAX_SKB_FRAGS + 1) { in tsi108_send_packet()
/linux-4.1.27/include/linux/
Dskbuff.h202 #define MAX_SKB_FRAGS 16UL macro
204 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1) macro
338 skb_frag_t frags[MAX_SKB_FRAGS];
/linux-4.1.27/drivers/net/ethernet/ti/
Dnetcp_core.c1878 netcp->tx_pause_threshold = MAX_SKB_FRAGS; in netcp_create_interface()
1971 if (netcp->tx_pool_size < MAX_SKB_FRAGS) { in netcp_create_interface()
1973 MAX_SKB_FRAGS); in netcp_create_interface()
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb3/
Dsge.c139 dma_addr_t addr[MAX_SKB_FRAGS + 1];
1144 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; in write_tx_pkt_wr()
1584 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; in write_ofld_wr()
3037 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3); in t3_sge_alloc_qset()
/linux-4.1.27/drivers/net/ethernet/intel/ixgbe/
Dixgbe.h171 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
/linux-4.1.27/net/ipv4/
Dip_output.c1052 if (i == MAX_SKB_FRAGS) in __ip_append_data()
1282 } else if (i < MAX_SKB_FRAGS) { in ip_append_page()
Dtcp.c1026 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE) in select_size()
Dtcp_input.c4449 int npages = min_t(size_t, size >> PAGE_SHIFT, MAX_SKB_FRAGS); in tcp_send_rcvq()
/linux-4.1.27/drivers/net/ethernet/broadcom/bnx2x/
Dbnx2x_cmn.h1248 return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS; in bnx2x_mtu_allows_gro()
Dbnx2x_cmn.c217 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) { in bnx2x_free_tx_pkt()
601 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) { in bnx2x_fill_frag_skb()
3377 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3796 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT) in bnx2x_start_xmit()
Dbnx2x.h388 #define TPA_AGG_SIZE min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) * \
767 #define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT)
Dbnx2x_ethtool.c1862 (ering->tx_pending <= MAX_SKB_FRAGS + 4)) { in bnx2x_set_ringparam()
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb/
Dsge.c449 if (credits < MAX_SKB_FRAGS + 1) in sched_skb()
711 (MAX_SKB_FRAGS + 1); in alloc_tx_resources()
Dcxgb2.c731 e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1)) in set_sge_param()
/linux-4.1.27/drivers/net/ethernet/realtek/
D8139cp.c705 if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1)) in cp_tx()
882 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) in cp_start_xmit()
Dr8169.c7108 if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { in rtl8169_start_xmit()
7122 if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) in rtl8169_start_xmit()
7236 TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { in rtl_tx()
/linux-4.1.27/drivers/net/ethernet/qlogic/qlcnic/
Dqlcnic.h99 #define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
364 struct qlcnic_skb_frag frag_array[MAX_SKB_FRAGS + 1];
/linux-4.1.27/drivers/net/ethernet/tehuti/
Dtehuti.c1483 } txd_sizes[MAX_SKB_FRAGS + 1];
1548 for (i = 0; i < MAX_SKB_FRAGS + 1; i++) { in init_txd_sizes()
/linux-4.1.27/drivers/net/ethernet/cisco/enic/
Denic_main.c173 (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) in enic_wq_service()
602 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) in enic_hard_start_xmit()
/linux-4.1.27/drivers/net/ethernet/marvell/
Dmv643xx_eth.c190 #define MV643XX_MAX_SKB_DESCS (MV643XX_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
930 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { in txq_submit_skb()
Dmvpp2.c4459 if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1) in mvpp2_txq_done()
5240 if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) { in mvpp2_tx()
Dmvneta.c264 #define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
Dskge.c58 #define TX_LOW_WATER (MAX_SKB_FRAGS + 1)
Dsky2.c70 #define MAX_SKB_TX_LE (2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1))
/linux-4.1.27/drivers/net/ethernet/broadcom/genet/
Dbcmgenet.c1074 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { in __bcmgenet_tx_reclaim()
1358 if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) in bcmgenet_xmit()
/linux-4.1.27/drivers/net/ethernet/aeroflex/
Dgreth.c736 (MAX_SKB_FRAGS+1))) in greth_clean_tx_gbit()
/linux-4.1.27/drivers/net/ethernet/freescale/
Dfec_main.c211 #define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
469 if (entries_free < MAX_SKB_FRAGS + 1) { in fec_enet_txq_submit_skb()
/linux-4.1.27/drivers/net/vmxnet3/
Dvmxnet3_drv.c647 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); in vmxnet3_append_frag()
2159 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS); in vmxnet3_setup_driver_shared()
/linux-4.1.27/net/ipv6/
Dip6_output.c1508 if (i == MAX_SKB_FRAGS) in __ip6_append_data()
/linux-4.1.27/net/packet/
Daf_packet.c2233 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) { in tpacket_fill_skb()
2235 MAX_SKB_FRAGS); in tpacket_fill_skb()
/linux-4.1.27/drivers/net/ethernet/samsung/sxgbe/
Dsxgbe_main.c1420 if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) <= (MAX_SKB_FRAGS + 1))) { in sxgbe_xmit()
/linux-4.1.27/drivers/net/ethernet/intel/ixgb/
Dixgb_main.c1509 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \
/linux-4.1.27/drivers/infiniband/hw/nes/
Dnes_nic.c473 #define NES_MAX_TSO_FRAGS MAX_SKB_FRAGS in nes_netdev_start_xmit()
/linux-4.1.27/net/unix/
Daf_unix.c1624 MAX_SKB_FRAGS * PAGE_SIZE); in unix_dgram_sendmsg()
/linux-4.1.27/drivers/net/ethernet/intel/igbvf/
Dnetdev.c2279 igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4); in igbvf_xmit_frame_ring_adv()
/linux-4.1.27/drivers/net/ethernet/emulex/benet/
Dbe_main.c1856 BUG_ON(j > MAX_SKB_FRAGS); in skb_fill_rx_data()
1934 BUG_ON(j > MAX_SKB_FRAGS); in be_rx_compl_process_gro()
/linux-4.1.27/drivers/net/ethernet/stmicro/stmmac/
Dstmmac_main.c2057 if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { in stmmac_xmit()
/linux-4.1.27/drivers/net/ethernet/
Djme.c2225 if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) { in jme_stop_queue_if_full()
/linux-4.1.27/drivers/net/ethernet/agere/
Det131x.c2439 BUILD_BUG_ON(MAX_SKB_FRAGS + 1 > 23); in nic_send_packet()
/linux-4.1.27/drivers/net/ethernet/neterion/
Ds2io.c644 fifo->max_txds = MAX_SKB_FRAGS + 2; in init_shared_mem()
7857 config->max_txds = MAX_SKB_FRAGS + 2; in s2io_init_nic()
/linux-4.1.27/drivers/net/ethernet/broadcom/
Dbnx2.c6696 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) { in bnx2_start_xmit()
7364 (ering->tx_pending <= MAX_SKB_FRAGS)) { in bnx2_set_ringparam()
Dtg3.c8120 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { in tg3_start_xmit()
12363 (ering->tx_pending <= MAX_SKB_FRAGS) || in tg3_set_ringparam()
12365 (ering->tx_pending <= (MAX_SKB_FRAGS * 3)))) in tg3_set_ringparam()
/linux-4.1.27/drivers/net/ethernet/intel/e1000/
De1000_main.c3264 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); in e1000_xmit_frame()
/linux-4.1.27/drivers/net/ethernet/intel/e1000e/
Dnetdev.c5670 (MAX_SKB_FRAGS * in e1000_xmit_frame()