Lines Matching refs:skb

72 static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)  in tcp_event_new_data_sent()  argument
78 tcp_advance_send_head(sk, skb); in tcp_event_new_data_sent()
79 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; in tcp_event_new_data_sent()
81 tp->packets_out += tcp_skb_pcount(skb); in tcp_event_new_data_sent()
88 tcp_skb_pcount(skb)); in tcp_event_new_data_sent()
315 static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb) in tcp_ecn_send_synack() argument
319 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR; in tcp_ecn_send_synack()
321 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE; in tcp_ecn_send_synack()
327 static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb) in tcp_ecn_send_syn() argument
343 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR; in tcp_ecn_send_syn()
350 static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb) in tcp_ecn_clear_syn() argument
356 TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR); in tcp_ecn_clear_syn()
369 static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb, in tcp_ecn_send() argument
376 if (skb->len != tcp_header_len && in tcp_ecn_send()
377 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { in tcp_ecn_send()
381 tcp_hdr(skb)->cwr = 1; in tcp_ecn_send()
382 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; in tcp_ecn_send()
389 tcp_hdr(skb)->ece = 1; in tcp_ecn_send()
396 static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) in tcp_init_nondata_skb() argument
398 skb->ip_summed = CHECKSUM_PARTIAL; in tcp_init_nondata_skb()
399 skb->csum = 0; in tcp_init_nondata_skb()
401 TCP_SKB_CB(skb)->tcp_flags = flags; in tcp_init_nondata_skb()
402 TCP_SKB_CB(skb)->sacked = 0; in tcp_init_nondata_skb()
404 tcp_skb_pcount_set(skb, 1); in tcp_init_nondata_skb()
406 TCP_SKB_CB(skb)->seq = seq; in tcp_init_nondata_skb()
409 TCP_SKB_CB(skb)->end_seq = seq; in tcp_init_nondata_skb()
545 static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, in tcp_syn_options() argument
577 opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset; in tcp_syn_options()
612 unsigned int mss, struct sk_buff *skb, in tcp_synack_options() argument
645 opts->tsval = tcp_skb_timestamp(skb); in tcp_synack_options()
673 static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb, in tcp_established_options() argument
695 opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0; in tcp_established_options()
854 void tcp_wfree(struct sk_buff *skb) in tcp_wfree() argument
856 struct sock *sk = skb->sk; in tcp_wfree()
863 wmem = atomic_sub_return(skb->truesize - 1, &sk->sk_wmem_alloc); in tcp_wfree()
903 static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, in tcp_transmit_skb() argument
916 BUG_ON(!skb || !tcp_skb_pcount(skb)); in tcp_transmit_skb()
919 skb_mstamp_get(&skb->skb_mstamp); in tcp_transmit_skb()
921 if (unlikely(skb_cloned(skb))) in tcp_transmit_skb()
922 skb = pskb_copy(skb, gfp_mask); in tcp_transmit_skb()
924 skb = skb_clone(skb, gfp_mask); in tcp_transmit_skb()
925 if (unlikely(!skb)) in tcp_transmit_skb()
931 tcb = TCP_SKB_CB(skb); in tcp_transmit_skb()
935 tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5); in tcp_transmit_skb()
937 tcp_options_size = tcp_established_options(sk, skb, &opts, in tcp_transmit_skb()
948 skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1); in tcp_transmit_skb()
950 skb_push(skb, tcp_header_size); in tcp_transmit_skb()
951 skb_reset_transport_header(skb); in tcp_transmit_skb()
953 skb_orphan(skb); in tcp_transmit_skb()
954 skb->sk = sk; in tcp_transmit_skb()
955 skb->destructor = skb_is_tcp_pure_ack(skb) ? sock_wfree : tcp_wfree; in tcp_transmit_skb()
956 skb_set_hash_from_sk(skb, sk); in tcp_transmit_skb()
957 atomic_add(skb->truesize, &sk->sk_wmem_alloc); in tcp_transmit_skb()
960 th = tcp_hdr(skb); in tcp_transmit_skb()
991 skb_shinfo(skb)->gso_type = sk->sk_gso_type; in tcp_transmit_skb()
993 tcp_ecn_send(sk, skb, tcp_header_size); in tcp_transmit_skb()
1000 md5, sk, skb); in tcp_transmit_skb()
1004 icsk->icsk_af_ops->send_check(sk, skb); in tcp_transmit_skb()
1007 tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); in tcp_transmit_skb()
1009 if (skb->len != tcp_header_size) in tcp_transmit_skb()
1014 tcp_skb_pcount(skb)); in tcp_transmit_skb()
1016 tp->segs_out += tcp_skb_pcount(skb); in tcp_transmit_skb()
1018 skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb); in tcp_transmit_skb()
1019 skb_shinfo(skb)->gso_size = tcp_skb_mss(skb); in tcp_transmit_skb()
1022 skb->tstamp.tv64 = 0; in tcp_transmit_skb()
1025 memset(skb->cb, 0, max(sizeof(struct inet_skb_parm), in tcp_transmit_skb()
1028 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); in tcp_transmit_skb()
1043 static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) in tcp_queue_skb() argument
1048 tp->write_seq = TCP_SKB_CB(skb)->end_seq; in tcp_queue_skb()
1049 __skb_header_release(skb); in tcp_queue_skb()
1050 tcp_add_write_queue_tail(sk, skb); in tcp_queue_skb()
1051 sk->sk_wmem_queued += skb->truesize; in tcp_queue_skb()
1052 sk_mem_charge(sk, skb->truesize); in tcp_queue_skb()
1056 static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now) in tcp_set_skb_tso_segs() argument
1058 if (skb->len <= mss_now || skb->ip_summed == CHECKSUM_NONE) { in tcp_set_skb_tso_segs()
1062 tcp_skb_pcount_set(skb, 1); in tcp_set_skb_tso_segs()
1063 TCP_SKB_CB(skb)->tcp_gso_size = 0; in tcp_set_skb_tso_segs()
1065 tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now)); in tcp_set_skb_tso_segs()
1066 TCP_SKB_CB(skb)->tcp_gso_size = mss_now; in tcp_set_skb_tso_segs()
1073 static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb, in tcp_adjust_fackets_out() argument
1081 if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq)) in tcp_adjust_fackets_out()
1088 static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr) in tcp_adjust_pcount() argument
1094 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) in tcp_adjust_pcount()
1096 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) in tcp_adjust_pcount()
1098 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) in tcp_adjust_pcount()
1105 tcp_adjust_fackets_out(sk, skb, decr); in tcp_adjust_pcount()
1108 before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) && in tcp_adjust_pcount()
1109 (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))) in tcp_adjust_pcount()
1115 static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2) in tcp_fragment_tstamp() argument
1117 struct skb_shared_info *shinfo = skb_shinfo(skb); in tcp_fragment_tstamp()
1135 int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, in tcp_fragment() argument
1144 if (WARN_ON(len > skb->len)) in tcp_fragment()
1147 nsize = skb_headlen(skb) - len; in tcp_fragment()
1151 if (skb_unclone(skb, gfp)) in tcp_fragment()
1161 nlen = skb->len - len - nsize; in tcp_fragment()
1163 skb->truesize -= nlen; in tcp_fragment()
1166 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; in tcp_fragment()
1167 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_fragment()
1168 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; in tcp_fragment()
1171 flags = TCP_SKB_CB(skb)->tcp_flags; in tcp_fragment()
1172 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); in tcp_fragment()
1174 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; in tcp_fragment()
1176 if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) { in tcp_fragment()
1178 buff->csum = csum_partial_copy_nocheck(skb->data + len, in tcp_fragment()
1182 skb_trim(skb, len); in tcp_fragment()
1184 skb->csum = csum_block_sub(skb->csum, buff->csum, len); in tcp_fragment()
1186 skb->ip_summed = CHECKSUM_PARTIAL; in tcp_fragment()
1187 skb_split(skb, buff, len); in tcp_fragment()
1190 buff->ip_summed = skb->ip_summed; in tcp_fragment()
1192 buff->tstamp = skb->tstamp; in tcp_fragment()
1193 tcp_fragment_tstamp(skb, buff); in tcp_fragment()
1195 old_factor = tcp_skb_pcount(skb); in tcp_fragment()
1198 tcp_set_skb_tso_segs(skb, mss_now); in tcp_fragment()
1205 int diff = old_factor - tcp_skb_pcount(skb) - in tcp_fragment()
1209 tcp_adjust_pcount(sk, skb, diff); in tcp_fragment()
1214 tcp_insert_write_queue_after(skb, buff, sk); in tcp_fragment()
1223 static void __pskb_trim_head(struct sk_buff *skb, int len) in __pskb_trim_head() argument
1228 eat = min_t(int, len, skb_headlen(skb)); in __pskb_trim_head()
1230 __skb_pull(skb, eat); in __pskb_trim_head()
1237 shinfo = skb_shinfo(skb); in __pskb_trim_head()
1242 skb_frag_unref(skb, i); in __pskb_trim_head()
1256 skb_reset_tail_pointer(skb); in __pskb_trim_head()
1257 skb->data_len -= len; in __pskb_trim_head()
1258 skb->len = skb->data_len; in __pskb_trim_head()
1262 int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) in tcp_trim_head() argument
1264 if (skb_unclone(skb, GFP_ATOMIC)) in tcp_trim_head()
1267 __pskb_trim_head(skb, len); in tcp_trim_head()
1269 TCP_SKB_CB(skb)->seq += len; in tcp_trim_head()
1270 skb->ip_summed = CHECKSUM_PARTIAL; in tcp_trim_head()
1272 skb->truesize -= len; in tcp_trim_head()
1278 if (tcp_skb_pcount(skb) > 1) in tcp_trim_head()
1279 tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb)); in tcp_trim_head()
1509 const struct sk_buff *skb) in tcp_minshall_update() argument
1511 if (skb->len < tcp_skb_pcount(skb) * mss_now) in tcp_minshall_update()
1512 tp->snd_sml = TCP_SKB_CB(skb)->end_seq; in tcp_minshall_update()
1552 const struct sk_buff *skb, in tcp_mss_split_point() argument
1560 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_mss_split_point()
1563 if (likely(max_len <= window && skb != tcp_write_queue_tail(sk))) in tcp_mss_split_point()
1566 needed = min(skb->len, window); in tcp_mss_split_point()
1586 const struct sk_buff *skb) in tcp_cwnd_test() argument
1591 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) && in tcp_cwnd_test()
1592 tcp_skb_pcount(skb) == 1) in tcp_cwnd_test()
1611 static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now) in tcp_init_tso_segs() argument
1613 int tso_segs = tcp_skb_pcount(skb); in tcp_init_tso_segs()
1615 if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) { in tcp_init_tso_segs()
1616 tcp_set_skb_tso_segs(skb, mss_now); in tcp_init_tso_segs()
1617 tso_segs = tcp_skb_pcount(skb); in tcp_init_tso_segs()
1626 static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, in tcp_nagle_test() argument
1639 if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) in tcp_nagle_test()
1642 if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle)) in tcp_nagle_test()
1650 const struct sk_buff *skb, in tcp_snd_wnd_test() argument
1653 u32 end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_snd_wnd_test()
1655 if (skb->len > cur_mss) in tcp_snd_wnd_test()
1656 end_seq = TCP_SKB_CB(skb)->seq + cur_mss; in tcp_snd_wnd_test()
1665 static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb, in tcp_snd_test() argument
1671 tcp_init_tso_segs(skb, cur_mss); in tcp_snd_test()
1673 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle)) in tcp_snd_test()
1676 cwnd_quota = tcp_cwnd_test(tp, skb); in tcp_snd_test()
1677 if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss)) in tcp_snd_test()
1687 struct sk_buff *skb = tcp_send_head(sk); in tcp_may_send_now() local
1689 return skb && in tcp_may_send_now()
1690 tcp_snd_test(sk, skb, tcp_current_mss(sk), in tcp_may_send_now()
1691 (tcp_skb_is_last(sk, skb) ? in tcp_may_send_now()
1702 static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, in tso_fragment() argument
1706 int nlen = skb->len - len; in tso_fragment()
1710 if (skb->len != skb->data_len) in tso_fragment()
1711 return tcp_fragment(sk, skb, len, mss_now, gfp); in tso_fragment()
1720 skb->truesize -= nlen; in tso_fragment()
1723 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; in tso_fragment()
1724 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; in tso_fragment()
1725 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; in tso_fragment()
1728 flags = TCP_SKB_CB(skb)->tcp_flags; in tso_fragment()
1729 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); in tso_fragment()
1735 buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL; in tso_fragment()
1736 skb_split(skb, buff, len); in tso_fragment()
1737 tcp_fragment_tstamp(skb, buff); in tso_fragment()
1740 tcp_set_skb_tso_segs(skb, mss_now); in tso_fragment()
1745 tcp_insert_write_queue_after(skb, buff, sk); in tso_fragment()
1755 static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, in tcp_tso_should_defer() argument
1765 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) in tcp_tso_should_defer()
1779 BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight)); in tcp_tso_should_defer()
1781 send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_tso_should_defer()
1793 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) in tcp_tso_should_defer()
1825 if (cong_win < send_win && cong_win <= skb->len) in tcp_tso_should_defer()
1872 struct sk_buff *skb, *nskb, *next; in tcp_mtu_probe() local
1938 skb = tcp_send_head(sk); in tcp_mtu_probe()
1940 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; in tcp_mtu_probe()
1941 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; in tcp_mtu_probe()
1945 nskb->ip_summed = skb->ip_summed; in tcp_mtu_probe()
1947 tcp_insert_write_queue_before(nskb, skb, sk); in tcp_mtu_probe()
1950 tcp_for_write_queue_from_safe(skb, next, sk) { in tcp_mtu_probe()
1951 copy = min_t(int, skb->len, probe_size - len); in tcp_mtu_probe()
1953 skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); in tcp_mtu_probe()
1955 nskb->csum = skb_copy_and_csum_bits(skb, 0, in tcp_mtu_probe()
1959 if (skb->len <= copy) { in tcp_mtu_probe()
1962 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; in tcp_mtu_probe()
1963 tcp_unlink_write_queue(skb, sk); in tcp_mtu_probe()
1964 sk_wmem_free_skb(sk, skb); in tcp_mtu_probe()
1966 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags & in tcp_mtu_probe()
1968 if (!skb_shinfo(skb)->nr_frags) { in tcp_mtu_probe()
1969 skb_pull(skb, copy); in tcp_mtu_probe()
1970 if (skb->ip_summed != CHECKSUM_PARTIAL) in tcp_mtu_probe()
1971 skb->csum = csum_partial(skb->data, in tcp_mtu_probe()
1972 skb->len, 0); in tcp_mtu_probe()
1974 __pskb_trim_head(skb, copy); in tcp_mtu_probe()
1975 tcp_set_skb_tso_segs(skb, mss_now); in tcp_mtu_probe()
1977 TCP_SKB_CB(skb)->seq += copy; in tcp_mtu_probe()
2024 struct sk_buff *skb; in tcp_write_xmit() local
2044 while ((skb = tcp_send_head(sk))) { in tcp_write_xmit()
2047 tso_segs = tcp_init_tso_segs(skb, mss_now); in tcp_write_xmit()
2052 skb_mstamp_get(&skb->skb_mstamp); in tcp_write_xmit()
2056 cwnd_quota = tcp_cwnd_test(tp, skb); in tcp_write_xmit()
2065 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) in tcp_write_xmit()
2069 if (unlikely(!tcp_nagle_test(tp, skb, mss_now, in tcp_write_xmit()
2070 (tcp_skb_is_last(sk, skb) ? in tcp_write_xmit()
2075 tcp_tso_should_defer(sk, skb, &is_cwnd_limited, in tcp_write_xmit()
2082 limit = tcp_mss_split_point(sk, skb, mss_now, in tcp_write_xmit()
2088 if (skb->len > limit && in tcp_write_xmit()
2089 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) in tcp_write_xmit()
2102 limit = max(2 * skb->truesize, sk->sk_pacing_rate >> 10); in tcp_write_xmit()
2116 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) in tcp_write_xmit()
2123 tcp_event_new_data_sent(sk, skb); in tcp_write_xmit()
2125 tcp_minshall_update(tp, mss_now, skb); in tcp_write_xmit()
2126 sent_pkts += tcp_skb_pcount(skb); in tcp_write_xmit()
2211 const struct sk_buff *skb) in skb_still_in_host_queue() argument
2213 if (unlikely(skb_fclone_busy(sk, skb))) { in skb_still_in_host_queue()
2227 struct sk_buff *skb; in tcp_send_loss_probe() local
2231 skb = tcp_send_head(sk); in tcp_send_loss_probe()
2232 if (skb) { in tcp_send_loss_probe()
2233 if (tcp_snd_wnd_test(tp, skb, mss)) { in tcp_send_loss_probe()
2240 skb = tcp_write_queue_prev(sk, skb); in tcp_send_loss_probe()
2242 skb = tcp_write_queue_tail(sk); in tcp_send_loss_probe()
2250 if (WARN_ON(!skb)) in tcp_send_loss_probe()
2253 if (skb_still_in_host_queue(sk, skb)) in tcp_send_loss_probe()
2256 pcount = tcp_skb_pcount(skb); in tcp_send_loss_probe()
2260 if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) { in tcp_send_loss_probe()
2261 if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss, in tcp_send_loss_probe()
2264 skb = tcp_write_queue_next(sk, skb); in tcp_send_loss_probe()
2267 if (WARN_ON(!skb || !tcp_skb_pcount(skb))) in tcp_send_loss_probe()
2270 if (__tcp_retransmit_skb(sk, skb)) in tcp_send_loss_probe()
2308 struct sk_buff *skb = tcp_send_head(sk); in tcp_push_one() local
2310 BUG_ON(!skb || skb->len < mss_now); in tcp_push_one()
2446 static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) in tcp_collapse_retrans() argument
2449 struct sk_buff *next_skb = tcp_write_queue_next(sk, skb); in tcp_collapse_retrans()
2452 skb_size = skb->len; in tcp_collapse_retrans()
2455 BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1); in tcp_collapse_retrans()
2457 tcp_highest_sack_combine(sk, next_skb, skb); in tcp_collapse_retrans()
2461 skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size), in tcp_collapse_retrans()
2465 skb->ip_summed = CHECKSUM_PARTIAL; in tcp_collapse_retrans()
2467 if (skb->ip_summed != CHECKSUM_PARTIAL) in tcp_collapse_retrans()
2468 skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size); in tcp_collapse_retrans()
2471 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; in tcp_collapse_retrans()
2474 TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags; in tcp_collapse_retrans()
2479 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS; in tcp_collapse_retrans()
2484 tp->retransmit_skb_hint = skb; in tcp_collapse_retrans()
2492 static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb) in tcp_can_collapse() argument
2494 if (tcp_skb_pcount(skb) > 1) in tcp_can_collapse()
2497 if (skb_shinfo(skb)->nr_frags != 0) in tcp_can_collapse()
2499 if (skb_cloned(skb)) in tcp_can_collapse()
2501 if (skb == tcp_send_head(sk)) in tcp_can_collapse()
2504 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) in tcp_can_collapse()
2517 struct sk_buff *skb = to, *tmp; in tcp_retrans_try_collapse() local
2522 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) in tcp_retrans_try_collapse()
2525 tcp_for_write_queue_from_safe(skb, tmp, sk) { in tcp_retrans_try_collapse()
2526 if (!tcp_can_collapse(sk, skb)) in tcp_retrans_try_collapse()
2529 space -= skb->len; in tcp_retrans_try_collapse()
2541 if (skb->len > skb_availroom(to)) in tcp_retrans_try_collapse()
2544 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) in tcp_retrans_try_collapse()
2555 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) in __tcp_retransmit_skb() argument
2574 if (skb_still_in_host_queue(sk, skb)) in __tcp_retransmit_skb()
2577 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { in __tcp_retransmit_skb()
2578 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) in __tcp_retransmit_skb()
2580 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) in __tcp_retransmit_skb()
2594 if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) && in __tcp_retransmit_skb()
2595 TCP_SKB_CB(skb)->seq != tp->snd_una) in __tcp_retransmit_skb()
2598 if (skb->len > cur_mss) { in __tcp_retransmit_skb()
2599 if (tcp_fragment(sk, skb, cur_mss, cur_mss, GFP_ATOMIC)) in __tcp_retransmit_skb()
2602 int oldpcount = tcp_skb_pcount(skb); in __tcp_retransmit_skb()
2605 if (skb_unclone(skb, GFP_ATOMIC)) in __tcp_retransmit_skb()
2607 tcp_init_tso_segs(skb, cur_mss); in __tcp_retransmit_skb()
2608 tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb)); in __tcp_retransmit_skb()
2613 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN) in __tcp_retransmit_skb()
2614 tcp_ecn_clear_syn(sk, skb); in __tcp_retransmit_skb()
2616 tcp_retrans_try_collapse(sk, skb, cur_mss); in __tcp_retransmit_skb()
2626 if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) || in __tcp_retransmit_skb()
2627 skb_headroom(skb) >= 0xFFFF)) { in __tcp_retransmit_skb()
2630 skb_mstamp_get(&skb->skb_mstamp); in __tcp_retransmit_skb()
2631 nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC); in __tcp_retransmit_skb()
2635 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); in __tcp_retransmit_skb()
2639 TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS; in __tcp_retransmit_skb()
2642 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) in __tcp_retransmit_skb()
2649 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) in tcp_retransmit_skb() argument
2652 int err = __tcp_retransmit_skb(sk, skb); in tcp_retransmit_skb()
2656 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { in tcp_retransmit_skb()
2660 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; in tcp_retransmit_skb()
2661 tp->retrans_out += tcp_skb_pcount(skb); in tcp_retransmit_skb()
2665 tp->retrans_stamp = tcp_skb_timestamp(skb); in tcp_retransmit_skb()
2673 tp->undo_retrans += tcp_skb_pcount(skb); in tcp_retransmit_skb()
2719 struct sk_buff *skb; in tcp_xmit_retransmit_queue() local
2732 skb = tp->retransmit_skb_hint; in tcp_xmit_retransmit_queue()
2733 last_lost = TCP_SKB_CB(skb)->end_seq; in tcp_xmit_retransmit_queue()
2737 skb = tcp_write_queue_head(sk); in tcp_xmit_retransmit_queue()
2741 tcp_for_write_queue_from(skb, sk) { in tcp_xmit_retransmit_queue()
2742 __u8 sacked = TCP_SKB_CB(skb)->sacked; in tcp_xmit_retransmit_queue()
2744 if (skb == tcp_send_head(sk)) in tcp_xmit_retransmit_queue()
2748 tp->retransmit_skb_hint = skb; in tcp_xmit_retransmit_queue()
2762 if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) in tcp_xmit_retransmit_queue()
2766 } else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) { in tcp_xmit_retransmit_queue()
2772 skb = hole; in tcp_xmit_retransmit_queue()
2780 hole = skb; in tcp_xmit_retransmit_queue()
2784 last_lost = TCP_SKB_CB(skb)->end_seq; in tcp_xmit_retransmit_queue()
2794 if (tcp_retransmit_skb(sk, skb)) in tcp_xmit_retransmit_queue()
2800 tp->prr_out += tcp_skb_pcount(skb); in tcp_xmit_retransmit_queue()
2802 if (skb == tcp_write_queue_head(sk)) in tcp_xmit_retransmit_queue()
2832 struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk); in tcp_send_fin() local
2856 skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation); in tcp_send_fin()
2857 if (unlikely(!skb)) { in tcp_send_fin()
2862 skb_reserve(skb, MAX_TCP_HEADER); in tcp_send_fin()
2863 sk_forced_mem_schedule(sk, skb->truesize); in tcp_send_fin()
2865 tcp_init_nondata_skb(skb, tp->write_seq, in tcp_send_fin()
2867 tcp_queue_skb(sk, skb); in tcp_send_fin()
2879 struct sk_buff *skb; in tcp_send_active_reset() local
2882 skb = alloc_skb(MAX_TCP_HEADER, priority); in tcp_send_active_reset()
2883 if (!skb) { in tcp_send_active_reset()
2889 skb_reserve(skb, MAX_TCP_HEADER); in tcp_send_active_reset()
2890 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), in tcp_send_active_reset()
2892 skb_mstamp_get(&skb->skb_mstamp); in tcp_send_active_reset()
2894 if (tcp_transmit_skb(sk, skb, 0, priority)) in tcp_send_active_reset()
2908 struct sk_buff *skb; in tcp_send_synack() local
2910 skb = tcp_write_queue_head(sk); in tcp_send_synack()
2911 if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { in tcp_send_synack()
2915 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) { in tcp_send_synack()
2916 if (skb_cloned(skb)) { in tcp_send_synack()
2917 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); in tcp_send_synack()
2920 tcp_unlink_write_queue(skb, sk); in tcp_send_synack()
2923 sk_wmem_free_skb(sk, skb); in tcp_send_synack()
2926 skb = nskb; in tcp_send_synack()
2929 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK; in tcp_send_synack()
2930 tcp_ecn_send_synack(sk, skb); in tcp_send_synack()
2932 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); in tcp_send_synack()
2953 struct sk_buff *skb; in tcp_make_synack() local
2959 skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); in tcp_make_synack()
2960 if (unlikely(!skb)) { in tcp_make_synack()
2965 skb_reserve(skb, MAX_TCP_HEADER); in tcp_make_synack()
2968 skb_set_owner_w(skb, req_to_sk(req)); in tcp_make_synack()
2974 skb_set_owner_w(skb, (struct sock *)sk); in tcp_make_synack()
2976 skb_dst_set(skb, dst); in tcp_make_synack()
2986 skb->skb_mstamp.stamp_jiffies = cookie_init_timestamp(req); in tcp_make_synack()
2989 skb_mstamp_get(&skb->skb_mstamp); in tcp_make_synack()
2995 skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4); in tcp_make_synack()
2996 tcp_header_size = tcp_synack_options(req, mss, skb, &opts, md5, foc) + in tcp_make_synack()
2999 skb_push(skb, tcp_header_size); in tcp_make_synack()
3000 skb_reset_transport_header(skb); in tcp_make_synack()
3002 th = tcp_hdr(skb); in tcp_make_synack()
3012 tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn, in tcp_make_synack()
3015 th->seq = htonl(TCP_SKB_CB(skb)->seq); in tcp_make_synack()
3029 md5, req_to_sk(req), skb); in tcp_make_synack()
3034 skb->tstamp.tv64 = 0; in tcp_make_synack()
3035 return skb; in tcp_make_synack()
3130 static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb) in tcp_connect_queue_skb() argument
3133 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); in tcp_connect_queue_skb()
3135 tcb->end_seq += skb->len; in tcp_connect_queue_skb()
3136 __skb_header_release(skb); in tcp_connect_queue_skb()
3137 __tcp_add_write_queue_tail(sk, skb); in tcp_connect_queue_skb()
3138 sk->sk_wmem_queued += skb->truesize; in tcp_connect_queue_skb()
3139 sk_mem_charge(sk, skb->truesize); in tcp_connect_queue_skb()
3141 tp->packets_out += tcp_skb_pcount(skb); in tcp_connect_queue_skb()
3399 struct sk_buff *skb; in tcp_xmit_probe_skb() local
3402 skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); in tcp_xmit_probe_skb()
3403 if (!skb) in tcp_xmit_probe_skb()
3407 skb_reserve(skb, MAX_TCP_HEADER); in tcp_xmit_probe_skb()
3412 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); in tcp_xmit_probe_skb()
3413 skb_mstamp_get(&skb->skb_mstamp); in tcp_xmit_probe_skb()
3415 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); in tcp_xmit_probe_skb()
3430 struct sk_buff *skb; in tcp_write_wakeup() local
3435 skb = tcp_send_head(sk); in tcp_write_wakeup()
3436 if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { in tcp_write_wakeup()
3439 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_write_wakeup()
3441 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) in tcp_write_wakeup()
3442 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; in tcp_write_wakeup()
3448 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || in tcp_write_wakeup()
3449 skb->len > mss) { in tcp_write_wakeup()
3451 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; in tcp_write_wakeup()
3452 if (tcp_fragment(sk, skb, seg_size, mss, GFP_ATOMIC)) in tcp_write_wakeup()
3454 } else if (!tcp_skb_pcount(skb)) in tcp_write_wakeup()
3455 tcp_set_skb_tso_segs(skb, mss); in tcp_write_wakeup()
3457 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; in tcp_write_wakeup()
3458 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); in tcp_write_wakeup()
3460 tcp_event_new_data_sent(sk, skb); in tcp_write_wakeup()