/linux-4.4.14/drivers/net/ethernet/mellanox/mlx4/ |
D | en_tx.c | 560 const struct skb_shared_info *shinfo, in is_inline() argument 568 if (shinfo->nr_frags == 1) { in is_inline() 569 ptr = skb_frag_address_safe(&shinfo->frags[0]); in is_inline() 575 if (shinfo->nr_frags) in is_inline() 592 const struct skb_shared_info *shinfo, in get_real_size() argument 601 if (shinfo->gso_size) { in get_real_size() 607 real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE + in get_real_size() 623 shinfo, pfrag); in get_real_size() 629 (shinfo->nr_frags + 1) * DS_SIZE; in get_real_size() 637 const struct skb_shared_info *shinfo, in build_inline_wqe() argument [all …]
|
/linux-4.4.14/drivers/net/xen-netback/ |
D | netback.c | 868 struct skb_shared_info *shinfo = skb_shinfo(skb); in xenvif_get_requests() local 869 skb_frag_t *frags = shinfo->frags; in xenvif_get_requests() 875 nr_slots = shinfo->nr_frags; in xenvif_get_requests() 878 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); in xenvif_get_requests() 880 for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots; in xenvif_get_requests() 881 shinfo->nr_frags++, txp++, gop++) { in xenvif_get_requests() 885 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); in xenvif_get_requests() 890 shinfo = skb_shinfo(nskb); in xenvif_get_requests() 891 frags = shinfo->frags; in xenvif_get_requests() 893 for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow; in xenvif_get_requests() [all …]
|
/linux-4.4.14/net/core/ |
D | skbuff.c | 206 struct skb_shared_info *shinfo; in __alloc_skb() local 258 shinfo = skb_shinfo(skb); in __alloc_skb() 259 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); in __alloc_skb() 260 atomic_set(&shinfo->dataref, 1); in __alloc_skb() 261 kmemcheck_annotate_variable(shinfo->destructor_arg); in __alloc_skb() 305 struct skb_shared_info *shinfo; in __build_skb() local 326 shinfo = skb_shinfo(skb); in __build_skb() 327 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); in __build_skb() 328 atomic_set(&shinfo->dataref, 1); in __build_skb() 329 kmemcheck_annotate_variable(shinfo->destructor_arg); in __build_skb() [all …]
|
D | dev.c | 2842 const struct skb_shared_info *shinfo = skb_shinfo(skb); in qdisc_pkt_len_init() local 2849 if (shinfo->gso_size) { in qdisc_pkt_len_init() 2851 u16 gso_segs = shinfo->gso_segs; in qdisc_pkt_len_init() 2857 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) in qdisc_pkt_len_init() 2862 if (shinfo->gso_type & SKB_GSO_DODGY) in qdisc_pkt_len_init() 2864 shinfo->gso_size); in qdisc_pkt_len_init()
|
/linux-4.4.14/net/ipv4/ |
D | tcp_output.c | 1117 struct skb_shared_info *shinfo = skb_shinfo(skb); in tcp_fragment_tstamp() local 1119 if (unlikely(shinfo->tx_flags & SKBTX_ANY_TSTAMP) && in tcp_fragment_tstamp() 1120 !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) { in tcp_fragment_tstamp() 1122 u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP; in tcp_fragment_tstamp() 1124 shinfo->tx_flags &= ~tsflags; in tcp_fragment_tstamp() 1126 swap(shinfo->tskey, shinfo2->tskey); in tcp_fragment_tstamp() 1225 struct skb_shared_info *shinfo; in __pskb_trim_head() local 1237 shinfo = skb_shinfo(skb); in __pskb_trim_head() 1238 for (i = 0; i < shinfo->nr_frags; i++) { in __pskb_trim_head() 1239 int size = skb_frag_size(&shinfo->frags[i]); in __pskb_trim_head() [all …]
|
D | tcp.c | 435 struct skb_shared_info *shinfo = skb_shinfo(skb); in tcp_tx_timestamp() local 437 sock_tx_timestamp(sk, &shinfo->tx_flags); in tcp_tx_timestamp() 438 if (shinfo->tx_flags & SKBTX_ANY_TSTAMP) in tcp_tx_timestamp() 439 shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1; in tcp_tx_timestamp()
|
D | tcp_input.c | 3081 const struct skb_shared_info *shinfo; in tcp_ack_tstamp() local 3087 shinfo = skb_shinfo(skb); in tcp_ack_tstamp() 3088 if ((shinfo->tx_flags & SKBTX_ACK_TSTAMP) && in tcp_ack_tstamp() 3089 between(shinfo->tskey, prior_snd_una, tcp_sk(sk)->snd_una - 1)) in tcp_ack_tstamp()
|
/linux-4.4.14/drivers/net/ |
D | xen-netfront.c | 876 struct skb_shared_info *shinfo = skb_shinfo(skb); in xennet_fill_frags() local 885 if (shinfo->nr_frags == MAX_SKB_FRAGS) { in xennet_fill_frags() 891 BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS); in xennet_fill_frags() 893 skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag), in xennet_fill_frags()
|
/linux-4.4.14/include/linux/ |
D | skbuff.h | 3553 const struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_warn_if_lro() local 3555 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 && in skb_warn_if_lro() 3556 unlikely(shinfo->gso_type == 0)) { in skb_warn_if_lro()
|
/linux-4.4.14/drivers/net/ethernet/broadcom/ |
D | bnx2.c | 2958 struct skb_shared_info *shinfo; in bnx2_reuse_rx_skb_pages() local 2960 shinfo = skb_shinfo(skb); in bnx2_reuse_rx_skb_pages() 2961 shinfo->nr_frags--; in bnx2_reuse_rx_skb_pages() 2962 page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]); in bnx2_reuse_rx_skb_pages() 2963 __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL); in bnx2_reuse_rx_skb_pages()
|
/linux-4.4.14/drivers/net/ethernet/intel/e1000e/ |
D | netdev.c | 1526 struct skb_shared_info *shinfo; in e1000_clean_jumbo_rx_irq() local 1582 shinfo = skb_shinfo(rxtop); in e1000_clean_jumbo_rx_irq() 1583 skb_fill_page_desc(rxtop, shinfo->nr_frags, in e1000_clean_jumbo_rx_irq() 1594 shinfo = skb_shinfo(rxtop); in e1000_clean_jumbo_rx_irq() 1595 skb_fill_page_desc(rxtop, shinfo->nr_frags, in e1000_clean_jumbo_rx_irq()
|
/linux-4.4.14/drivers/net/ethernet/broadcom/bnxt/ |
D | bnxt.c | 708 struct skb_shared_info *shinfo; in bnxt_rx_pages() local 711 shinfo = skb_shinfo(skb); in bnxt_rx_pages() 712 nr_frags = --shinfo->nr_frags; in bnxt_rx_pages() 713 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL); in bnxt_rx_pages()
|