Lines Matching refs:skb
72 static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb) in tcp_event_new_data_sent() argument
78 tcp_advance_send_head(sk, skb); in tcp_event_new_data_sent()
79 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; in tcp_event_new_data_sent()
81 tp->packets_out += tcp_skb_pcount(skb); in tcp_event_new_data_sent()
88 tcp_skb_pcount(skb)); in tcp_event_new_data_sent()
318 static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb) in tcp_ecn_send_synack() argument
322 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR; in tcp_ecn_send_synack()
324 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE; in tcp_ecn_send_synack()
330 static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb) in tcp_ecn_send_syn() argument
346 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR; in tcp_ecn_send_syn()
367 static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb, in tcp_ecn_send() argument
374 if (skb->len != tcp_header_len && in tcp_ecn_send()
375 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { in tcp_ecn_send()
379 tcp_hdr(skb)->cwr = 1; in tcp_ecn_send()
380 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; in tcp_ecn_send()
387 tcp_hdr(skb)->ece = 1; in tcp_ecn_send()
394 static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) in tcp_init_nondata_skb() argument
396 struct skb_shared_info *shinfo = skb_shinfo(skb); in tcp_init_nondata_skb()
398 skb->ip_summed = CHECKSUM_PARTIAL; in tcp_init_nondata_skb()
399 skb->csum = 0; in tcp_init_nondata_skb()
401 TCP_SKB_CB(skb)->tcp_flags = flags; in tcp_init_nondata_skb()
402 TCP_SKB_CB(skb)->sacked = 0; in tcp_init_nondata_skb()
404 tcp_skb_pcount_set(skb, 1); in tcp_init_nondata_skb()
408 TCP_SKB_CB(skb)->seq = seq; in tcp_init_nondata_skb()
411 TCP_SKB_CB(skb)->end_seq = seq; in tcp_init_nondata_skb()
547 static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, in tcp_syn_options() argument
579 opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset; in tcp_syn_options()
615 unsigned int mss, struct sk_buff *skb, in tcp_synack_options() argument
648 opts->tsval = tcp_skb_timestamp(skb); in tcp_synack_options()
676 static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb, in tcp_established_options() argument
698 opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0; in tcp_established_options()
857 void tcp_wfree(struct sk_buff *skb) in tcp_wfree() argument
859 struct sock *sk = skb->sk; in tcp_wfree()
866 wmem = atomic_sub_return(skb->truesize - 1, &sk->sk_wmem_alloc); in tcp_wfree()
906 static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, in tcp_transmit_skb() argument
919 BUG_ON(!skb || !tcp_skb_pcount(skb)); in tcp_transmit_skb()
922 skb_mstamp_get(&skb->skb_mstamp); in tcp_transmit_skb()
924 if (unlikely(skb_cloned(skb))) in tcp_transmit_skb()
925 skb = pskb_copy(skb, gfp_mask); in tcp_transmit_skb()
927 skb = skb_clone(skb, gfp_mask); in tcp_transmit_skb()
928 if (unlikely(!skb)) in tcp_transmit_skb()
934 tcb = TCP_SKB_CB(skb); in tcp_transmit_skb()
938 tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5); in tcp_transmit_skb()
940 tcp_options_size = tcp_established_options(sk, skb, &opts, in tcp_transmit_skb()
954 skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1); in tcp_transmit_skb()
956 skb_push(skb, tcp_header_size); in tcp_transmit_skb()
957 skb_reset_transport_header(skb); in tcp_transmit_skb()
959 skb_orphan(skb); in tcp_transmit_skb()
960 skb->sk = sk; in tcp_transmit_skb()
961 skb->destructor = skb_is_tcp_pure_ack(skb) ? sock_wfree : tcp_wfree; in tcp_transmit_skb()
962 skb_set_hash_from_sk(skb, sk); in tcp_transmit_skb()
963 atomic_add(skb->truesize, &sk->sk_wmem_alloc); in tcp_transmit_skb()
966 th = tcp_hdr(skb); in tcp_transmit_skb()
998 tcp_ecn_send(sk, skb, tcp_header_size); in tcp_transmit_skb()
1005 md5, sk, skb); in tcp_transmit_skb()
1009 icsk->icsk_af_ops->send_check(sk, skb); in tcp_transmit_skb()
1012 tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); in tcp_transmit_skb()
1014 if (skb->len != tcp_header_size) in tcp_transmit_skb()
1019 tcp_skb_pcount(skb)); in tcp_transmit_skb()
1022 skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb); in tcp_transmit_skb()
1025 skb->tstamp.tv64 = 0; in tcp_transmit_skb()
1028 memset(skb->cb, 0, max(sizeof(struct inet_skb_parm), in tcp_transmit_skb()
1031 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); in tcp_transmit_skb()
1046 static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) in tcp_queue_skb() argument
1051 tp->write_seq = TCP_SKB_CB(skb)->end_seq; in tcp_queue_skb()
1052 __skb_header_release(skb); in tcp_queue_skb()
1053 tcp_add_write_queue_tail(sk, skb); in tcp_queue_skb()
1054 sk->sk_wmem_queued += skb->truesize; in tcp_queue_skb()
1055 sk_mem_charge(sk, skb->truesize); in tcp_queue_skb()
1059 static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb, in tcp_set_skb_tso_segs() argument
1062 struct skb_shared_info *shinfo = skb_shinfo(skb); in tcp_set_skb_tso_segs()
1065 WARN_ON_ONCE(skb_cloned(skb)); in tcp_set_skb_tso_segs()
1067 if (skb->len <= mss_now || skb->ip_summed == CHECKSUM_NONE) { in tcp_set_skb_tso_segs()
1071 tcp_skb_pcount_set(skb, 1); in tcp_set_skb_tso_segs()
1075 tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now)); in tcp_set_skb_tso_segs()
1084 static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb, in tcp_adjust_fackets_out() argument
1092 if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq)) in tcp_adjust_fackets_out()
1099 static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr) in tcp_adjust_pcount() argument
1105 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) in tcp_adjust_pcount()
1107 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) in tcp_adjust_pcount()
1109 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) in tcp_adjust_pcount()
1116 tcp_adjust_fackets_out(sk, skb, decr); in tcp_adjust_pcount()
1119 before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) && in tcp_adjust_pcount()
1120 (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))) in tcp_adjust_pcount()
1126 static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2) in tcp_fragment_tstamp() argument
1128 struct skb_shared_info *shinfo = skb_shinfo(skb); in tcp_fragment_tstamp()
1146 int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, in tcp_fragment() argument
1155 if (WARN_ON(len > skb->len)) in tcp_fragment()
1158 nsize = skb_headlen(skb) - len; in tcp_fragment()
1162 if (skb_unclone(skb, gfp)) in tcp_fragment()
1172 nlen = skb->len - len - nsize; in tcp_fragment()
1174 skb->truesize -= nlen; in tcp_fragment()
1177 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; in tcp_fragment()
1178 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_fragment()
1179 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; in tcp_fragment()
1182 flags = TCP_SKB_CB(skb)->tcp_flags; in tcp_fragment()
1183 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); in tcp_fragment()
1185 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; in tcp_fragment()
1187 if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) { in tcp_fragment()
1189 buff->csum = csum_partial_copy_nocheck(skb->data + len, in tcp_fragment()
1193 skb_trim(skb, len); in tcp_fragment()
1195 skb->csum = csum_block_sub(skb->csum, buff->csum, len); in tcp_fragment()
1197 skb->ip_summed = CHECKSUM_PARTIAL; in tcp_fragment()
1198 skb_split(skb, buff, len); in tcp_fragment()
1201 buff->ip_summed = skb->ip_summed; in tcp_fragment()
1203 buff->tstamp = skb->tstamp; in tcp_fragment()
1204 tcp_fragment_tstamp(skb, buff); in tcp_fragment()
1206 old_factor = tcp_skb_pcount(skb); in tcp_fragment()
1209 tcp_set_skb_tso_segs(sk, skb, mss_now); in tcp_fragment()
1216 int diff = old_factor - tcp_skb_pcount(skb) - in tcp_fragment()
1220 tcp_adjust_pcount(sk, skb, diff); in tcp_fragment()
1225 tcp_insert_write_queue_after(skb, buff, sk); in tcp_fragment()
1234 static void __pskb_trim_head(struct sk_buff *skb, int len) in __pskb_trim_head() argument
1239 eat = min_t(int, len, skb_headlen(skb)); in __pskb_trim_head()
1241 __skb_pull(skb, eat); in __pskb_trim_head()
1248 shinfo = skb_shinfo(skb); in __pskb_trim_head()
1253 skb_frag_unref(skb, i); in __pskb_trim_head()
1267 skb_reset_tail_pointer(skb); in __pskb_trim_head()
1268 skb->data_len -= len; in __pskb_trim_head()
1269 skb->len = skb->data_len; in __pskb_trim_head()
1273 int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) in tcp_trim_head() argument
1275 if (skb_unclone(skb, GFP_ATOMIC)) in tcp_trim_head()
1278 __pskb_trim_head(skb, len); in tcp_trim_head()
1280 TCP_SKB_CB(skb)->seq += len; in tcp_trim_head()
1281 skb->ip_summed = CHECKSUM_PARTIAL; in tcp_trim_head()
1283 skb->truesize -= len; in tcp_trim_head()
1289 if (tcp_skb_pcount(skb) > 1) in tcp_trim_head()
1290 tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb)); in tcp_trim_head()
1520 const struct sk_buff *skb) in tcp_minshall_update() argument
1522 if (skb->len < tcp_skb_pcount(skb) * mss_now) in tcp_minshall_update()
1523 tp->snd_sml = TCP_SKB_CB(skb)->end_seq; in tcp_minshall_update()
1563 const struct sk_buff *skb, in tcp_mss_split_point() argument
1571 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_mss_split_point()
1574 if (likely(max_len <= window && skb != tcp_write_queue_tail(sk))) in tcp_mss_split_point()
1577 needed = min(skb->len, window); in tcp_mss_split_point()
1597 const struct sk_buff *skb) in tcp_cwnd_test() argument
1602 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) && in tcp_cwnd_test()
1603 tcp_skb_pcount(skb) == 1) in tcp_cwnd_test()
1622 static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb, in tcp_init_tso_segs() argument
1625 int tso_segs = tcp_skb_pcount(skb); in tcp_init_tso_segs()
1627 if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) { in tcp_init_tso_segs()
1628 tcp_set_skb_tso_segs(sk, skb, mss_now); in tcp_init_tso_segs()
1629 tso_segs = tcp_skb_pcount(skb); in tcp_init_tso_segs()
1638 static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, in tcp_nagle_test() argument
1651 if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) in tcp_nagle_test()
1654 if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle)) in tcp_nagle_test()
1662 const struct sk_buff *skb, in tcp_snd_wnd_test() argument
1665 u32 end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_snd_wnd_test()
1667 if (skb->len > cur_mss) in tcp_snd_wnd_test()
1668 end_seq = TCP_SKB_CB(skb)->seq + cur_mss; in tcp_snd_wnd_test()
1677 static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb, in tcp_snd_test() argument
1683 tcp_init_tso_segs(sk, skb, cur_mss); in tcp_snd_test()
1685 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle)) in tcp_snd_test()
1688 cwnd_quota = tcp_cwnd_test(tp, skb); in tcp_snd_test()
1689 if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss)) in tcp_snd_test()
1699 struct sk_buff *skb = tcp_send_head(sk); in tcp_may_send_now() local
1701 return skb && in tcp_may_send_now()
1702 tcp_snd_test(sk, skb, tcp_current_mss(sk), in tcp_may_send_now()
1703 (tcp_skb_is_last(sk, skb) ? in tcp_may_send_now()
1714 static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, in tso_fragment() argument
1718 int nlen = skb->len - len; in tso_fragment()
1722 if (skb->len != skb->data_len) in tso_fragment()
1723 return tcp_fragment(sk, skb, len, mss_now, gfp); in tso_fragment()
1732 skb->truesize -= nlen; in tso_fragment()
1735 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; in tso_fragment()
1736 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; in tso_fragment()
1737 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; in tso_fragment()
1740 flags = TCP_SKB_CB(skb)->tcp_flags; in tso_fragment()
1741 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); in tso_fragment()
1747 buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL; in tso_fragment()
1748 skb_split(skb, buff, len); in tso_fragment()
1749 tcp_fragment_tstamp(skb, buff); in tso_fragment()
1752 tcp_set_skb_tso_segs(sk, skb, mss_now); in tso_fragment()
1757 tcp_insert_write_queue_after(skb, buff, sk); in tso_fragment()
1767 static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, in tcp_tso_should_defer() argument
1777 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) in tcp_tso_should_defer()
1791 BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight)); in tcp_tso_should_defer()
1793 send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_tso_should_defer()
1805 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) in tcp_tso_should_defer()
1837 if (cong_win < send_win && cong_win < skb->len) in tcp_tso_should_defer()
1884 struct sk_buff *skb, *nskb, *next; in tcp_mtu_probe() local
1950 skb = tcp_send_head(sk); in tcp_mtu_probe()
1952 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; in tcp_mtu_probe()
1953 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; in tcp_mtu_probe()
1957 nskb->ip_summed = skb->ip_summed; in tcp_mtu_probe()
1959 tcp_insert_write_queue_before(nskb, skb, sk); in tcp_mtu_probe()
1962 tcp_for_write_queue_from_safe(skb, next, sk) { in tcp_mtu_probe()
1963 copy = min_t(int, skb->len, probe_size - len); in tcp_mtu_probe()
1965 skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); in tcp_mtu_probe()
1967 nskb->csum = skb_copy_and_csum_bits(skb, 0, in tcp_mtu_probe()
1971 if (skb->len <= copy) { in tcp_mtu_probe()
1974 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; in tcp_mtu_probe()
1975 tcp_unlink_write_queue(skb, sk); in tcp_mtu_probe()
1976 sk_wmem_free_skb(sk, skb); in tcp_mtu_probe()
1978 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags & in tcp_mtu_probe()
1980 if (!skb_shinfo(skb)->nr_frags) { in tcp_mtu_probe()
1981 skb_pull(skb, copy); in tcp_mtu_probe()
1982 if (skb->ip_summed != CHECKSUM_PARTIAL) in tcp_mtu_probe()
1983 skb->csum = csum_partial(skb->data, in tcp_mtu_probe()
1984 skb->len, 0); in tcp_mtu_probe()
1986 __pskb_trim_head(skb, copy); in tcp_mtu_probe()
1987 tcp_set_skb_tso_segs(sk, skb, mss_now); in tcp_mtu_probe()
1989 TCP_SKB_CB(skb)->seq += copy; in tcp_mtu_probe()
2036 struct sk_buff *skb; in tcp_write_xmit() local
2056 while ((skb = tcp_send_head(sk))) { in tcp_write_xmit()
2059 tso_segs = tcp_init_tso_segs(sk, skb, mss_now); in tcp_write_xmit()
2064 skb_mstamp_get(&skb->skb_mstamp); in tcp_write_xmit()
2068 cwnd_quota = tcp_cwnd_test(tp, skb); in tcp_write_xmit()
2078 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) in tcp_write_xmit()
2082 if (unlikely(!tcp_nagle_test(tp, skb, mss_now, in tcp_write_xmit()
2083 (tcp_skb_is_last(sk, skb) ? in tcp_write_xmit()
2088 tcp_tso_should_defer(sk, skb, &is_cwnd_limited, in tcp_write_xmit()
2095 limit = tcp_mss_split_point(sk, skb, mss_now, in tcp_write_xmit()
2101 if (skb->len > limit && in tcp_write_xmit()
2102 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) in tcp_write_xmit()
2115 limit = max(2 * skb->truesize, sk->sk_pacing_rate >> 10); in tcp_write_xmit()
2129 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) in tcp_write_xmit()
2136 tcp_event_new_data_sent(sk, skb); in tcp_write_xmit()
2138 tcp_minshall_update(tp, mss_now, skb); in tcp_write_xmit()
2139 sent_pkts += tcp_skb_pcount(skb); in tcp_write_xmit()
2222 const struct sk_buff *skb) in skb_still_in_host_queue() argument
2224 if (unlikely(skb_fclone_busy(sk, skb))) { in skb_still_in_host_queue()
2238 struct sk_buff *skb; in tcp_send_loss_probe() local
2253 skb = tcp_write_queue_tail(sk); in tcp_send_loss_probe()
2254 if (WARN_ON(!skb)) in tcp_send_loss_probe()
2257 if (skb_still_in_host_queue(sk, skb)) in tcp_send_loss_probe()
2260 pcount = tcp_skb_pcount(skb); in tcp_send_loss_probe()
2264 if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) { in tcp_send_loss_probe()
2265 if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss, in tcp_send_loss_probe()
2268 skb = tcp_write_queue_tail(sk); in tcp_send_loss_probe()
2271 if (WARN_ON(!skb || !tcp_skb_pcount(skb))) in tcp_send_loss_probe()
2274 err = __tcp_retransmit_skb(sk, skb); in tcp_send_loss_probe()
2314 struct sk_buff *skb = tcp_send_head(sk); in tcp_push_one() local
2316 BUG_ON(!skb || skb->len < mss_now); in tcp_push_one()
2452 static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) in tcp_collapse_retrans() argument
2455 struct sk_buff *next_skb = tcp_write_queue_next(sk, skb); in tcp_collapse_retrans()
2458 skb_size = skb->len; in tcp_collapse_retrans()
2461 BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1); in tcp_collapse_retrans()
2463 tcp_highest_sack_combine(sk, next_skb, skb); in tcp_collapse_retrans()
2467 skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size), in tcp_collapse_retrans()
2471 skb->ip_summed = CHECKSUM_PARTIAL; in tcp_collapse_retrans()
2473 if (skb->ip_summed != CHECKSUM_PARTIAL) in tcp_collapse_retrans()
2474 skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size); in tcp_collapse_retrans()
2477 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; in tcp_collapse_retrans()
2480 TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags; in tcp_collapse_retrans()
2485 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS; in tcp_collapse_retrans()
2490 tp->retransmit_skb_hint = skb; in tcp_collapse_retrans()
2498 static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb) in tcp_can_collapse() argument
2500 if (tcp_skb_pcount(skb) > 1) in tcp_can_collapse()
2503 if (skb_shinfo(skb)->nr_frags != 0) in tcp_can_collapse()
2505 if (skb_cloned(skb)) in tcp_can_collapse()
2507 if (skb == tcp_send_head(sk)) in tcp_can_collapse()
2510 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) in tcp_can_collapse()
2523 struct sk_buff *skb = to, *tmp; in tcp_retrans_try_collapse() local
2528 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) in tcp_retrans_try_collapse()
2531 tcp_for_write_queue_from_safe(skb, tmp, sk) { in tcp_retrans_try_collapse()
2532 if (!tcp_can_collapse(sk, skb)) in tcp_retrans_try_collapse()
2535 space -= skb->len; in tcp_retrans_try_collapse()
2547 if (skb->len > skb_availroom(to)) in tcp_retrans_try_collapse()
2550 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) in tcp_retrans_try_collapse()
2561 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) in __tcp_retransmit_skb() argument
2580 if (skb_still_in_host_queue(sk, skb)) in __tcp_retransmit_skb()
2583 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { in __tcp_retransmit_skb()
2584 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) in __tcp_retransmit_skb()
2586 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) in __tcp_retransmit_skb()
2600 if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) && in __tcp_retransmit_skb()
2601 TCP_SKB_CB(skb)->seq != tp->snd_una) in __tcp_retransmit_skb()
2604 if (skb->len > cur_mss) { in __tcp_retransmit_skb()
2605 if (tcp_fragment(sk, skb, cur_mss, cur_mss, GFP_ATOMIC)) in __tcp_retransmit_skb()
2608 int oldpcount = tcp_skb_pcount(skb); in __tcp_retransmit_skb()
2611 if (skb_unclone(skb, GFP_ATOMIC)) in __tcp_retransmit_skb()
2613 tcp_init_tso_segs(sk, skb, cur_mss); in __tcp_retransmit_skb()
2614 tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb)); in __tcp_retransmit_skb()
2618 tcp_retrans_try_collapse(sk, skb, cur_mss); in __tcp_retransmit_skb()
2628 if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) || in __tcp_retransmit_skb()
2629 skb_headroom(skb) >= 0xFFFF)) { in __tcp_retransmit_skb()
2630 struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, in __tcp_retransmit_skb()
2635 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); in __tcp_retransmit_skb()
2639 TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS; in __tcp_retransmit_skb()
2642 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) in __tcp_retransmit_skb()
2649 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) in tcp_retransmit_skb() argument
2652 int err = __tcp_retransmit_skb(sk, skb); in tcp_retransmit_skb()
2656 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { in tcp_retransmit_skb()
2662 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; in tcp_retransmit_skb()
2663 tp->retrans_out += tcp_skb_pcount(skb); in tcp_retransmit_skb()
2667 tp->retrans_stamp = tcp_skb_timestamp(skb); in tcp_retransmit_skb()
2672 TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt; in tcp_retransmit_skb()
2679 tp->undo_retrans += tcp_skb_pcount(skb); in tcp_retransmit_skb()
2725 struct sk_buff *skb; in tcp_xmit_retransmit_queue() local
2738 skb = tp->retransmit_skb_hint; in tcp_xmit_retransmit_queue()
2739 last_lost = TCP_SKB_CB(skb)->end_seq; in tcp_xmit_retransmit_queue()
2743 skb = tcp_write_queue_head(sk); in tcp_xmit_retransmit_queue()
2747 tcp_for_write_queue_from(skb, sk) { in tcp_xmit_retransmit_queue()
2748 __u8 sacked = TCP_SKB_CB(skb)->sacked; in tcp_xmit_retransmit_queue()
2750 if (skb == tcp_send_head(sk)) in tcp_xmit_retransmit_queue()
2754 tp->retransmit_skb_hint = skb; in tcp_xmit_retransmit_queue()
2768 if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) in tcp_xmit_retransmit_queue()
2772 } else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) { in tcp_xmit_retransmit_queue()
2778 skb = hole; in tcp_xmit_retransmit_queue()
2786 hole = skb; in tcp_xmit_retransmit_queue()
2790 last_lost = TCP_SKB_CB(skb)->end_seq; in tcp_xmit_retransmit_queue()
2800 if (tcp_retransmit_skb(sk, skb)) in tcp_xmit_retransmit_queue()
2806 tp->prr_out += tcp_skb_pcount(skb); in tcp_xmit_retransmit_queue()
2808 if (skb == tcp_write_queue_head(sk)) in tcp_xmit_retransmit_queue()
2836 struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk); in tcp_send_fin() local
2860 skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation); in tcp_send_fin()
2861 if (unlikely(!skb)) { in tcp_send_fin()
2866 skb_reserve(skb, MAX_TCP_HEADER); in tcp_send_fin()
2867 sk_forced_wmem_schedule(sk, skb->truesize); in tcp_send_fin()
2869 tcp_init_nondata_skb(skb, tp->write_seq, in tcp_send_fin()
2871 tcp_queue_skb(sk, skb); in tcp_send_fin()
2883 struct sk_buff *skb; in tcp_send_active_reset() local
2886 skb = alloc_skb(MAX_TCP_HEADER, priority); in tcp_send_active_reset()
2887 if (!skb) { in tcp_send_active_reset()
2893 skb_reserve(skb, MAX_TCP_HEADER); in tcp_send_active_reset()
2894 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), in tcp_send_active_reset()
2896 skb_mstamp_get(&skb->skb_mstamp); in tcp_send_active_reset()
2898 if (tcp_transmit_skb(sk, skb, 0, priority)) in tcp_send_active_reset()
2912 struct sk_buff *skb; in tcp_send_synack() local
2914 skb = tcp_write_queue_head(sk); in tcp_send_synack()
2915 if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { in tcp_send_synack()
2919 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) { in tcp_send_synack()
2920 if (skb_cloned(skb)) { in tcp_send_synack()
2921 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); in tcp_send_synack()
2924 tcp_unlink_write_queue(skb, sk); in tcp_send_synack()
2927 sk_wmem_free_skb(sk, skb); in tcp_send_synack()
2930 skb = nskb; in tcp_send_synack()
2933 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK; in tcp_send_synack()
2934 tcp_ecn_send_synack(sk, skb); in tcp_send_synack()
2936 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); in tcp_send_synack()
2956 struct sk_buff *skb; in tcp_make_synack() local
2961 skb = sock_wmalloc(sk, MAX_TCP_HEADER, 1, GFP_ATOMIC); in tcp_make_synack()
2962 if (unlikely(!skb)) { in tcp_make_synack()
2967 skb_reserve(skb, MAX_TCP_HEADER); in tcp_make_synack()
2969 skb_dst_set(skb, dst); in tcp_make_synack()
2978 skb->skb_mstamp.stamp_jiffies = cookie_init_timestamp(req); in tcp_make_synack()
2981 skb_mstamp_get(&skb->skb_mstamp); in tcp_make_synack()
2987 tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5, in tcp_make_synack()
2990 skb_push(skb, tcp_header_size); in tcp_make_synack()
2991 skb_reset_transport_header(skb); in tcp_make_synack()
2993 th = tcp_hdr(skb); in tcp_make_synack()
3003 tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn, in tcp_make_synack()
3006 th->seq = htonl(TCP_SKB_CB(skb)->seq); in tcp_make_synack()
3020 md5, req_to_sk(req), skb); in tcp_make_synack()
3025 skb->tstamp.tv64 = 0; in tcp_make_synack()
3026 return skb; in tcp_make_synack()
3121 static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb) in tcp_connect_queue_skb() argument
3124 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); in tcp_connect_queue_skb()
3126 tcb->end_seq += skb->len; in tcp_connect_queue_skb()
3127 __skb_header_release(skb); in tcp_connect_queue_skb()
3128 __tcp_add_write_queue_tail(sk, skb); in tcp_connect_queue_skb()
3129 sk->sk_wmem_queued += skb->truesize; in tcp_connect_queue_skb()
3130 sk_mem_charge(sk, skb->truesize); in tcp_connect_queue_skb()
3132 tp->packets_out += tcp_skb_pcount(skb); in tcp_connect_queue_skb()
3390 struct sk_buff *skb; in tcp_xmit_probe_skb() local
3393 skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); in tcp_xmit_probe_skb()
3394 if (!skb) in tcp_xmit_probe_skb()
3398 skb_reserve(skb, MAX_TCP_HEADER); in tcp_xmit_probe_skb()
3403 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); in tcp_xmit_probe_skb()
3404 skb_mstamp_get(&skb->skb_mstamp); in tcp_xmit_probe_skb()
3405 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); in tcp_xmit_probe_skb()
3420 struct sk_buff *skb; in tcp_write_wakeup() local
3425 skb = tcp_send_head(sk); in tcp_write_wakeup()
3426 if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { in tcp_write_wakeup()
3429 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_write_wakeup()
3431 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) in tcp_write_wakeup()
3432 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; in tcp_write_wakeup()
3438 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || in tcp_write_wakeup()
3439 skb->len > mss) { in tcp_write_wakeup()
3441 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; in tcp_write_wakeup()
3442 if (tcp_fragment(sk, skb, seg_size, mss, GFP_ATOMIC)) in tcp_write_wakeup()
3444 } else if (!tcp_skb_pcount(skb)) in tcp_write_wakeup()
3445 tcp_set_skb_tso_segs(sk, skb, mss); in tcp_write_wakeup()
3447 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; in tcp_write_wakeup()
3448 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); in tcp_write_wakeup()
3450 tcp_event_new_data_sent(sk, skb); in tcp_write_wakeup()