Lines Matching refs:mss_now
68 static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1056 static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now) in tcp_set_skb_tso_segs() argument
1058 if (skb->len <= mss_now || skb->ip_summed == CHECKSUM_NONE) { in tcp_set_skb_tso_segs()
1065 tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now)); in tcp_set_skb_tso_segs()
1066 TCP_SKB_CB(skb)->tcp_gso_size = mss_now; in tcp_set_skb_tso_segs()
1136 unsigned int mss_now, gfp_t gfp) in tcp_fragment() argument
1198 tcp_set_skb_tso_segs(skb, mss_now); in tcp_fragment()
1199 tcp_set_skb_tso_segs(buff, mss_now); in tcp_fragment()
1289 int mss_now; in __tcp_mtu_to_mss() local
1294 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); in __tcp_mtu_to_mss()
1301 mss_now -= icsk->icsk_af_ops->net_frag_header_len; in __tcp_mtu_to_mss()
1305 if (mss_now > tp->rx_opt.mss_clamp) in __tcp_mtu_to_mss()
1306 mss_now = tp->rx_opt.mss_clamp; in __tcp_mtu_to_mss()
1309 mss_now -= icsk->icsk_ext_hdr_len; in __tcp_mtu_to_mss()
1312 if (mss_now < 48) in __tcp_mtu_to_mss()
1313 mss_now = 48; in __tcp_mtu_to_mss()
1314 return mss_now; in __tcp_mtu_to_mss()
1390 int mss_now; in tcp_sync_mss() local
1395 mss_now = tcp_mtu_to_mss(sk, pmtu); in tcp_sync_mss()
1396 mss_now = tcp_bound_to_half_wnd(tp, mss_now); in tcp_sync_mss()
1401 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); in tcp_sync_mss()
1402 tp->mss_cache = mss_now; in tcp_sync_mss()
1404 return mss_now; in tcp_sync_mss()
1415 u32 mss_now; in tcp_current_mss() local
1420 mss_now = tp->mss_cache; in tcp_current_mss()
1425 mss_now = tcp_sync_mss(sk, mtu); in tcp_current_mss()
1436 mss_now -= delta; in tcp_current_mss()
1439 return mss_now; in tcp_current_mss()
1508 static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now, in tcp_minshall_update() argument
1511 if (skb->len < tcp_skb_pcount(skb) * mss_now) in tcp_minshall_update()
1533 static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now) in tcp_tso_autosize() argument
1545 segs = max_t(u32, bytes / mss_now, sysctl_tcp_min_tso_segs); in tcp_tso_autosize()
1553 unsigned int mss_now, in tcp_mss_split_point() argument
1561 max_len = mss_now * max_segs; in tcp_mss_split_point()
1571 partial = needed % mss_now; in tcp_mss_split_point()
1611 static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now) in tcp_init_tso_segs() argument
1615 if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) { in tcp_init_tso_segs()
1616 tcp_set_skb_tso_segs(skb, mss_now); in tcp_init_tso_segs()
1703 unsigned int mss_now, gfp_t gfp) in tso_fragment() argument
1711 return tcp_fragment(sk, skb, len, mss_now, gfp); in tso_fragment()
1740 tcp_set_skb_tso_segs(skb, mss_now); in tso_fragment()
1741 tcp_set_skb_tso_segs(buff, mss_now); in tso_fragment()
1878 int mss_now; in tcp_mtu_probe() local
1896 mss_now = tcp_current_mss(sk); in tcp_mtu_probe()
1975 tcp_set_skb_tso_segs(skb, mss_now); in tcp_mtu_probe()
2020 static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, in tcp_write_xmit() argument
2043 max_segs = tcp_tso_autosize(sk, mss_now); in tcp_write_xmit()
2047 tso_segs = tcp_init_tso_segs(skb, mss_now); in tcp_write_xmit()
2065 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) in tcp_write_xmit()
2069 if (unlikely(!tcp_nagle_test(tp, skb, mss_now, in tcp_write_xmit()
2080 limit = mss_now; in tcp_write_xmit()
2082 limit = tcp_mss_split_point(sk, skb, mss_now, in tcp_write_xmit()
2089 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) in tcp_write_xmit()
2125 tcp_minshall_update(tp, mss_now, skb); in tcp_write_xmit()
2306 void tcp_push_one(struct sock *sk, unsigned int mss_now) in tcp_push_one() argument
2310 BUG_ON(!skb || skb->len < mss_now); in tcp_push_one()
2312 tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation); in tcp_push_one()