Home
last modified time | relevance | path

Searched refs:shinfo (Results 1 – 11 of 11) sorted by relevance

/linux-4.1.27/drivers/net/ethernet/mellanox/mlx4/
Den_tx.c558 const struct skb_shared_info *shinfo, in is_inline() argument
566 if (shinfo->nr_frags == 1) { in is_inline()
567 ptr = skb_frag_address_safe(&shinfo->frags[0]); in is_inline()
573 if (shinfo->nr_frags) in is_inline()
590 const struct skb_shared_info *shinfo, in get_real_size() argument
599 if (shinfo->gso_size) { in get_real_size()
605 real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE + in get_real_size()
621 shinfo, pfrag); in get_real_size()
627 (shinfo->nr_frags + 1) * DS_SIZE; in get_real_size()
635 const struct skb_shared_info *shinfo, in build_inline_wqe() argument
[all …]
/linux-4.1.27/drivers/net/xen-netback/
Dnetback.c820 struct skb_shared_info *shinfo = skb_shinfo(skb); in xenvif_get_requests() local
821 skb_frag_t *frags = shinfo->frags; in xenvif_get_requests()
830 if (shinfo->nr_frags > MAX_SKB_FRAGS) { in xenvif_get_requests()
831 frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS; in xenvif_get_requests()
833 shinfo->nr_frags = MAX_SKB_FRAGS; in xenvif_get_requests()
835 nr_slots = shinfo->nr_frags; in xenvif_get_requests()
838 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); in xenvif_get_requests()
840 for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots; in xenvif_get_requests()
841 shinfo->nr_frags++, txp++, gop++) { in xenvif_get_requests()
845 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); in xenvif_get_requests()
[all …]
/linux-4.1.27/net/ipv4/
Dtcp_output.c396 struct skb_shared_info *shinfo = skb_shinfo(skb); in tcp_init_nondata_skb() local
405 shinfo->gso_size = 0; in tcp_init_nondata_skb()
406 shinfo->gso_type = 0; in tcp_init_nondata_skb()
1062 struct skb_shared_info *shinfo = skb_shinfo(skb); in tcp_set_skb_tso_segs() local
1072 shinfo->gso_size = 0; in tcp_set_skb_tso_segs()
1073 shinfo->gso_type = 0; in tcp_set_skb_tso_segs()
1076 shinfo->gso_size = mss_now; in tcp_set_skb_tso_segs()
1077 shinfo->gso_type = sk->sk_gso_type; in tcp_set_skb_tso_segs()
1128 struct skb_shared_info *shinfo = skb_shinfo(skb); in tcp_fragment_tstamp() local
1130 if (unlikely(shinfo->tx_flags & SKBTX_ANY_TSTAMP) && in tcp_fragment_tstamp()
[all …]
Dtcp.c434 struct skb_shared_info *shinfo = skb_shinfo(skb); in tcp_tx_timestamp() local
436 sock_tx_timestamp(sk, &shinfo->tx_flags); in tcp_tx_timestamp()
437 if (shinfo->tx_flags & SKBTX_ANY_TSTAMP) in tcp_tx_timestamp()
438 shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1; in tcp_tx_timestamp()
Dtcp_input.c3041 const struct skb_shared_info *shinfo; in tcp_ack_tstamp() local
3047 shinfo = skb_shinfo(skb); in tcp_ack_tstamp()
3048 if ((shinfo->tx_flags & SKBTX_ACK_TSTAMP) && in tcp_ack_tstamp()
3049 between(shinfo->tskey, prior_snd_una, tcp_sk(sk)->snd_una - 1)) in tcp_ack_tstamp()
/linux-4.1.27/net/core/
Dskbuff.c206 struct skb_shared_info *shinfo; in __alloc_skb() local
258 shinfo = skb_shinfo(skb); in __alloc_skb()
259 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); in __alloc_skb()
260 atomic_set(&shinfo->dataref, 1); in __alloc_skb()
261 kmemcheck_annotate_variable(shinfo->destructor_arg); in __alloc_skb()
305 struct skb_shared_info *shinfo; in __build_skb() local
326 shinfo = skb_shinfo(skb); in __build_skb()
327 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); in __build_skb()
328 atomic_set(&shinfo->dataref, 1); in __build_skb()
329 kmemcheck_annotate_variable(shinfo->destructor_arg); in __build_skb()
[all …]
Ddev.c2779 const struct skb_shared_info *shinfo = skb_shinfo(skb); in qdisc_pkt_len_init() local
2786 if (shinfo->gso_size) { in qdisc_pkt_len_init()
2788 u16 gso_segs = shinfo->gso_segs; in qdisc_pkt_len_init()
2794 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) in qdisc_pkt_len_init()
2799 if (shinfo->gso_type & SKB_GSO_DODGY) in qdisc_pkt_len_init()
2801 shinfo->gso_size); in qdisc_pkt_len_init()
/linux-4.1.27/drivers/net/
Dxen-netfront.c825 struct skb_shared_info *shinfo = skb_shinfo(skb); in xennet_fill_frags() local
834 if (shinfo->nr_frags == MAX_SKB_FRAGS) { in xennet_fill_frags()
840 BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS); in xennet_fill_frags()
842 skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag), in xennet_fill_frags()
/linux-4.1.27/include/linux/
Dskbuff.h3387 const struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_warn_if_lro() local
3389 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 && in skb_warn_if_lro()
3390 unlikely(shinfo->gso_type == 0)) { in skb_warn_if_lro()
/linux-4.1.27/drivers/net/ethernet/broadcom/
Dbnx2.c2940 struct skb_shared_info *shinfo; in bnx2_reuse_rx_skb_pages() local
2942 shinfo = skb_shinfo(skb); in bnx2_reuse_rx_skb_pages()
2943 shinfo->nr_frags--; in bnx2_reuse_rx_skb_pages()
2944 page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]); in bnx2_reuse_rx_skb_pages()
2945 __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL); in bnx2_reuse_rx_skb_pages()
/linux-4.1.27/drivers/net/ethernet/intel/e1000e/
Dnetdev.c1526 struct skb_shared_info *shinfo; in e1000_clean_jumbo_rx_irq() local
1582 shinfo = skb_shinfo(rxtop); in e1000_clean_jumbo_rx_irq()
1583 skb_fill_page_desc(rxtop, shinfo->nr_frags, in e1000_clean_jumbo_rx_irq()
1594 shinfo = skb_shinfo(rxtop); in e1000_clean_jumbo_rx_irq()
1595 skb_fill_page_desc(rxtop, shinfo->nr_frags, in e1000_clean_jumbo_rx_irq()