Lines Matching refs:skb

132 static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)  in tcp_measure_rcv_mss()  argument
143 len = skb_shinfo(skb)->gso_size ? : skb->len; in tcp_measure_rcv_mss()
152 len += skb->data - skb_transport_header(skb); in tcp_measure_rcv_mss()
160 !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) { in tcp_measure_rcv_mss()
216 static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb) in tcp_ecn_accept_cwr() argument
218 if (tcp_hdr(skb)->cwr) in tcp_ecn_accept_cwr()
227 static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) in __tcp_ecn_check_ce() argument
229 switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) { in __tcp_ecn_check_ce()
257 static void tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) in tcp_ecn_check_ce() argument
260 __tcp_ecn_check_ce(tp, skb); in tcp_ecn_check_ce()
342 static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb) in __tcp_grow_window() argument
346 int truesize = tcp_win_from_space(skb->truesize) >> 1; in __tcp_grow_window()
350 if (truesize <= skb->len) in __tcp_grow_window()
359 static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) in tcp_grow_window() argument
372 if (tcp_win_from_space(skb->truesize) <= skb->len) in tcp_grow_window()
375 incr = __tcp_grow_window(sk, skb); in tcp_grow_window()
378 incr = max_t(int, incr, 2 * skb->len); in tcp_grow_window()
543 const struct sk_buff *skb) in tcp_rcv_rtt_measure_ts() argument
547 (TCP_SKB_CB(skb)->end_seq - in tcp_rcv_rtt_measure_ts()
548 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) in tcp_rcv_rtt_measure_ts()
632 static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) in tcp_event_data_recv() argument
640 tcp_measure_rcv_mss(sk, skb); in tcp_event_data_recv()
672 tcp_ecn_check_ce(tp, skb); in tcp_event_data_recv()
674 if (skb->len >= 128) in tcp_event_data_recv()
675 tcp_grow_window(sk, skb); in tcp_event_data_recv()
888 static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) in tcp_verify_retransmit_hint() argument
891 before(TCP_SKB_CB(skb)->seq, in tcp_verify_retransmit_hint()
893 tp->retransmit_skb_hint = skb; in tcp_verify_retransmit_hint()
896 after(TCP_SKB_CB(skb)->end_seq, tp->retransmit_high)) in tcp_verify_retransmit_hint()
897 tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; in tcp_verify_retransmit_hint()
900 static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb) in tcp_skb_mark_lost() argument
902 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { in tcp_skb_mark_lost()
903 tcp_verify_retransmit_hint(tp, skb); in tcp_skb_mark_lost()
905 tp->lost_out += tcp_skb_pcount(skb); in tcp_skb_mark_lost()
906 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; in tcp_skb_mark_lost()
910 void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb) in tcp_skb_mark_lost_uncond_verify() argument
912 tcp_verify_retransmit_hint(tp, skb); in tcp_skb_mark_lost_uncond_verify()
914 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { in tcp_skb_mark_lost_uncond_verify()
915 tp->lost_out += tcp_skb_pcount(skb); in tcp_skb_mark_lost_uncond_verify()
916 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; in tcp_skb_mark_lost_uncond_verify()
1106 static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, in tcp_match_skb_to_sack() argument
1114 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && in tcp_match_skb_to_sack()
1115 !before(end_seq, TCP_SKB_CB(skb)->end_seq); in tcp_match_skb_to_sack()
1117 if (tcp_skb_pcount(skb) > 1 && !in_sack && in tcp_match_skb_to_sack()
1118 after(TCP_SKB_CB(skb)->end_seq, start_seq)) { in tcp_match_skb_to_sack()
1119 mss = tcp_skb_mss(skb); in tcp_match_skb_to_sack()
1120 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); in tcp_match_skb_to_sack()
1123 pkt_len = start_seq - TCP_SKB_CB(skb)->seq; in tcp_match_skb_to_sack()
1127 pkt_len = end_seq - TCP_SKB_CB(skb)->seq; in tcp_match_skb_to_sack()
1139 if (new_len >= skb->len) in tcp_match_skb_to_sack()
1144 err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC); in tcp_match_skb_to_sack()
1240 static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, in tcp_shifted_skb() argument
1246 struct sk_buff *prev = tcp_write_queue_prev(sk, skb); in tcp_shifted_skb()
1247 u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */ in tcp_shifted_skb()
1258 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, in tcp_shifted_skb()
1260 &skb->skb_mstamp); in tcp_shifted_skb()
1262 if (skb == tp->lost_skb_hint) in tcp_shifted_skb()
1266 TCP_SKB_CB(skb)->seq += shifted; in tcp_shifted_skb()
1269 BUG_ON(tcp_skb_pcount(skb) < pcount); in tcp_shifted_skb()
1270 tcp_skb_pcount_add(skb, -pcount); in tcp_shifted_skb()
1281 if (tcp_skb_pcount(skb) <= 1) in tcp_shifted_skb()
1282 TCP_SKB_CB(skb)->tcp_gso_size = 0; in tcp_shifted_skb()
1285 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); in tcp_shifted_skb()
1287 if (skb->len > 0) { in tcp_shifted_skb()
1288 BUG_ON(!tcp_skb_pcount(skb)); in tcp_shifted_skb()
1295 if (skb == tp->retransmit_skb_hint) in tcp_shifted_skb()
1297 if (skb == tp->lost_skb_hint) { in tcp_shifted_skb()
1302 TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; in tcp_shifted_skb()
1303 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) in tcp_shifted_skb()
1306 if (skb == tcp_highest_sack(sk)) in tcp_shifted_skb()
1307 tcp_advance_highest_sack(sk, skb); in tcp_shifted_skb()
1309 tcp_unlink_write_queue(skb, sk); in tcp_shifted_skb()
1310 sk_wmem_free_skb(sk, skb); in tcp_shifted_skb()
1320 static int tcp_skb_seglen(const struct sk_buff *skb) in tcp_skb_seglen() argument
1322 return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb); in tcp_skb_seglen()
1326 static int skb_can_shift(const struct sk_buff *skb) in skb_can_shift() argument
1328 return !skb_headlen(skb) && skb_is_nonlinear(skb); in skb_can_shift()
1334 static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, in tcp_shift_skb_data() argument
1351 (TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS) in tcp_shift_skb_data()
1353 if (!skb_can_shift(skb)) in tcp_shift_skb_data()
1356 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) in tcp_shift_skb_data()
1360 if (unlikely(skb == tcp_write_queue_head(sk))) in tcp_shift_skb_data()
1362 prev = tcp_write_queue_prev(sk, skb); in tcp_shift_skb_data()
1367 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && in tcp_shift_skb_data()
1368 !before(end_seq, TCP_SKB_CB(skb)->end_seq); in tcp_shift_skb_data()
1371 len = skb->len; in tcp_shift_skb_data()
1372 pcount = tcp_skb_pcount(skb); in tcp_shift_skb_data()
1373 mss = tcp_skb_seglen(skb); in tcp_shift_skb_data()
1381 if (!after(TCP_SKB_CB(skb)->end_seq, start_seq)) in tcp_shift_skb_data()
1387 if (tcp_skb_pcount(skb) <= 1) in tcp_shift_skb_data()
1390 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); in tcp_shift_skb_data()
1406 len = end_seq - TCP_SKB_CB(skb)->seq; in tcp_shift_skb_data()
1408 BUG_ON(len > skb->len); in tcp_shift_skb_data()
1414 mss = tcp_skb_mss(skb); in tcp_shift_skb_data()
1433 if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una)) in tcp_shift_skb_data()
1436 if (!skb_shift(prev, skb, len)) in tcp_shift_skb_data()
1438 if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack)) in tcp_shift_skb_data()
1446 skb = tcp_write_queue_next(sk, prev); in tcp_shift_skb_data()
1448 if (!skb_can_shift(skb) || in tcp_shift_skb_data()
1449 (skb == tcp_send_head(sk)) || in tcp_shift_skb_data()
1450 ((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) || in tcp_shift_skb_data()
1451 (mss != tcp_skb_seglen(skb))) in tcp_shift_skb_data()
1454 len = skb->len; in tcp_shift_skb_data()
1455 if (skb_shift(prev, skb, len)) { in tcp_shift_skb_data()
1456 pcount += tcp_skb_pcount(skb); in tcp_shift_skb_data()
1457 tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0); in tcp_shift_skb_data()
1465 return skb; in tcp_shift_skb_data()
1472 static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, in tcp_sacktag_walk() argument
1481 tcp_for_write_queue_from(skb, sk) { in tcp_sacktag_walk()
1485 if (skb == tcp_send_head(sk)) in tcp_sacktag_walk()
1489 if (!before(TCP_SKB_CB(skb)->seq, end_seq)) in tcp_sacktag_walk()
1493 before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) { in tcp_sacktag_walk()
1494 in_sack = tcp_match_skb_to_sack(sk, skb, in tcp_sacktag_walk()
1506 tmp = tcp_shift_skb_data(sk, skb, state, in tcp_sacktag_walk()
1509 if (tmp != skb) { in tcp_sacktag_walk()
1510 skb = tmp; in tcp_sacktag_walk()
1516 in_sack = tcp_match_skb_to_sack(sk, skb, in tcp_sacktag_walk()
1526 TCP_SKB_CB(skb)->sacked = in tcp_sacktag_walk()
1529 TCP_SKB_CB(skb)->sacked, in tcp_sacktag_walk()
1530 TCP_SKB_CB(skb)->seq, in tcp_sacktag_walk()
1531 TCP_SKB_CB(skb)->end_seq, in tcp_sacktag_walk()
1533 tcp_skb_pcount(skb), in tcp_sacktag_walk()
1534 &skb->skb_mstamp); in tcp_sacktag_walk()
1536 if (!before(TCP_SKB_CB(skb)->seq, in tcp_sacktag_walk()
1538 tcp_advance_highest_sack(sk, skb); in tcp_sacktag_walk()
1541 state->fack_count += tcp_skb_pcount(skb); in tcp_sacktag_walk()
1543 return skb; in tcp_sacktag_walk()
1549 static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk, in tcp_sacktag_skip() argument
1553 tcp_for_write_queue_from(skb, sk) { in tcp_sacktag_skip()
1554 if (skb == tcp_send_head(sk)) in tcp_sacktag_skip()
1557 if (after(TCP_SKB_CB(skb)->end_seq, skip_to_seq)) in tcp_sacktag_skip()
1560 state->fack_count += tcp_skb_pcount(skb); in tcp_sacktag_skip()
1562 return skb; in tcp_sacktag_skip()
1565 static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb, in tcp_maybe_skipping_dsack() argument
1572 return skb; in tcp_maybe_skipping_dsack()
1575 skb = tcp_sacktag_skip(skb, sk, state, next_dup->start_seq); in tcp_maybe_skipping_dsack()
1576 skb = tcp_sacktag_walk(skb, sk, NULL, state, in tcp_maybe_skipping_dsack()
1581 return skb; in tcp_maybe_skipping_dsack()
1599 struct sk_buff *skb; in tcp_sacktag_write_queue() local
1682 skb = tcp_write_queue_head(sk); in tcp_sacktag_write_queue()
1717 skb = tcp_sacktag_skip(skb, sk, state, in tcp_sacktag_write_queue()
1719 skb = tcp_sacktag_walk(skb, sk, next_dup, in tcp_sacktag_write_queue()
1730 skb = tcp_maybe_skipping_dsack(skb, sk, next_dup, in tcp_sacktag_write_queue()
1737 skb = tcp_highest_sack(sk); in tcp_sacktag_write_queue()
1738 if (!skb) in tcp_sacktag_write_queue()
1745 skb = tcp_sacktag_skip(skb, sk, state, cache->end_seq); in tcp_sacktag_write_queue()
1752 skb = tcp_highest_sack(sk); in tcp_sacktag_write_queue()
1753 if (!skb) in tcp_sacktag_write_queue()
1757 skb = tcp_sacktag_skip(skb, sk, state, start_seq); in tcp_sacktag_write_queue()
1760 skb = tcp_sacktag_walk(skb, sk, next_dup, state, in tcp_sacktag_write_queue()
1876 struct sk_buff *skb; in tcp_enter_loss() local
1899 skb = tcp_write_queue_head(sk); in tcp_enter_loss()
1900 is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED); in tcp_enter_loss()
1908 tcp_for_write_queue(skb, sk) { in tcp_enter_loss()
1909 if (skb == tcp_send_head(sk)) in tcp_enter_loss()
1912 TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED; in tcp_enter_loss()
1913 if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || is_reneg) { in tcp_enter_loss()
1914 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; in tcp_enter_loss()
1915 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; in tcp_enter_loss()
1916 tp->lost_out += tcp_skb_pcount(skb); in tcp_enter_loss()
1917 tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; in tcp_enter_loss()
2166 struct sk_buff *skb; in tcp_mark_head_lost() local
2175 skb = tp->lost_skb_hint; in tcp_mark_head_lost()
2178 if (mark_head && skb != tcp_write_queue_head(sk)) in tcp_mark_head_lost()
2181 skb = tcp_write_queue_head(sk); in tcp_mark_head_lost()
2185 tcp_for_write_queue_from(skb, sk) { in tcp_mark_head_lost()
2186 if (skb == tcp_send_head(sk)) in tcp_mark_head_lost()
2190 tp->lost_skb_hint = skb; in tcp_mark_head_lost()
2193 if (after(TCP_SKB_CB(skb)->end_seq, loss_high)) in tcp_mark_head_lost()
2198 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) in tcp_mark_head_lost()
2199 cnt += tcp_skb_pcount(skb); in tcp_mark_head_lost()
2203 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) || in tcp_mark_head_lost()
2207 mss = tcp_skb_mss(skb); in tcp_mark_head_lost()
2208 err = tcp_fragment(sk, skb, (packets - oldcnt) * mss, in tcp_mark_head_lost()
2215 tcp_skb_mark_lost(tp, skb); in tcp_mark_head_lost()
2265 const struct sk_buff *skb) in tcp_skb_spurious_retrans() argument
2267 return (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) && in tcp_skb_spurious_retrans()
2268 tcp_tsopt_ecr_before(tp, tcp_skb_timestamp(skb)); in tcp_skb_spurious_retrans()
2299 struct sk_buff *skb; in tcp_any_retrans_done() local
2304 skb = tcp_write_queue_head(sk); in tcp_any_retrans_done()
2305 if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) in tcp_any_retrans_done()
2346 struct sk_buff *skb; in tcp_undo_cwnd_reduction() local
2348 tcp_for_write_queue(skb, sk) { in tcp_undo_cwnd_reduction()
2349 if (skb == tcp_send_head(sk)) in tcp_undo_cwnd_reduction()
2351 TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; in tcp_undo_cwnd_reduction()
2598 struct sk_buff *skb; in tcp_simple_retransmit() local
2602 tcp_for_write_queue(skb, sk) { in tcp_simple_retransmit()
2603 if (skb == tcp_send_head(sk)) in tcp_simple_retransmit()
2605 if (tcp_skb_seglen(skb) > mss && in tcp_simple_retransmit()
2606 !(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { in tcp_simple_retransmit()
2607 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { in tcp_simple_retransmit()
2608 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; in tcp_simple_retransmit()
2609 tp->retrans_out -= tcp_skb_pcount(skb); in tcp_simple_retransmit()
2611 tcp_skb_mark_lost_uncond_verify(tp, skb); in tcp_simple_retransmit()
3024 struct sk_buff *skb = tcp_write_queue_head(sk); in tcp_rearm_rto() local
3026 tcp_skb_timestamp(skb) + rto; in tcp_rearm_rto()
3058 static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) in tcp_tso_acked() argument
3063 BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)); in tcp_tso_acked()
3065 packets_acked = tcp_skb_pcount(skb); in tcp_tso_acked()
3066 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) in tcp_tso_acked()
3068 packets_acked -= tcp_skb_pcount(skb); in tcp_tso_acked()
3071 BUG_ON(tcp_skb_pcount(skb) == 0); in tcp_tso_acked()
3072 BUG_ON(!before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)); in tcp_tso_acked()
3078 static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb, in tcp_ack_tstamp() argument
3087 shinfo = skb_shinfo(skb); in tcp_ack_tstamp()
3090 __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK); in tcp_ack_tstamp()
3110 struct sk_buff *skb; in tcp_clean_rtx_queue() local
3117 while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { in tcp_clean_rtx_queue()
3118 struct tcp_skb_cb *scb = TCP_SKB_CB(skb); in tcp_clean_rtx_queue()
3122 tcp_ack_tstamp(sk, skb, prior_snd_una); in tcp_clean_rtx_queue()
3126 if (tcp_skb_pcount(skb) == 1 || in tcp_clean_rtx_queue()
3130 acked_pcount = tcp_tso_acked(sk, skb); in tcp_clean_rtx_queue()
3137 prefetchw(skb->next); in tcp_clean_rtx_queue()
3138 acked_pcount = tcp_skb_pcount(skb); in tcp_clean_rtx_queue()
3146 last_ackt = skb->skb_mstamp; in tcp_clean_rtx_queue()
3158 else if (tcp_is_sack(tp) && !tcp_skb_spurious_retrans(tp, skb)) in tcp_clean_rtx_queue()
3159 tcp_rack_advance(tp, &skb->skb_mstamp, sacked); in tcp_clean_rtx_queue()
3183 tcp_unlink_write_queue(skb, sk); in tcp_clean_rtx_queue()
3184 sk_wmem_free_skb(sk, skb); in tcp_clean_rtx_queue()
3185 if (unlikely(skb == tp->retransmit_skb_hint)) in tcp_clean_rtx_queue()
3187 if (unlikely(skb == tp->lost_skb_hint)) in tcp_clean_rtx_queue()
3194 if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) in tcp_clean_rtx_queue()
3233 } else if (skb && rtt_update && sack_rtt_us >= 0 && in tcp_clean_rtx_queue()
3234 sack_rtt_us > skb_mstamp_us_delta(&now, &skb->skb_mstamp)) { in tcp_clean_rtx_queue()
3355 static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack, in tcp_ack_update_window() argument
3360 u32 nwin = ntohs(tcp_hdr(skb)->window); in tcp_ack_update_window()
3362 if (likely(!tcp_hdr(skb)->syn)) in tcp_ack_update_window()
3400 bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb, in tcp_oow_rate_limited() argument
3404 if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) && in tcp_oow_rate_limited()
3405 !tcp_hdr(skb)->syn) in tcp_oow_rate_limited()
3424 static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb) in tcp_send_challenge_ack() argument
3433 if (tcp_oow_rate_limited(sock_net(sk), skb, in tcp_send_challenge_ack()
3512 static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) in tcp_ack() argument
3518 u32 ack_seq = TCP_SKB_CB(skb)->seq; in tcp_ack()
3519 u32 ack = TCP_SKB_CB(skb)->ack_seq; in tcp_ack()
3537 tcp_send_challenge_ack(sk, skb); in tcp_ack()
3564 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); in tcp_ack()
3581 if (ack_seq != TCP_SKB_CB(skb)->end_seq) in tcp_ack()
3586 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); in tcp_ack()
3588 if (TCP_SKB_CB(skb)->sacked) in tcp_ack()
3589 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, in tcp_ack()
3592 if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) { in tcp_ack()
3665 if (TCP_SKB_CB(skb)->sacked) { in tcp_ack()
3666 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, in tcp_ack()
3697 void tcp_parse_options(const struct sk_buff *skb, in tcp_parse_options() argument
3702 const struct tcphdr *th = tcp_hdr(skb); in tcp_parse_options()
3771 TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th; in tcp_parse_options()
3830 static bool tcp_fast_parse_options(const struct sk_buff *skb, in tcp_fast_parse_options() argument
3845 tcp_parse_options(skb, &tp->rx_opt, 1, NULL); in tcp_fast_parse_options()
3913 static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb) in tcp_disordered_ack() argument
3916 const struct tcphdr *th = tcp_hdr(skb); in tcp_disordered_ack()
3917 u32 seq = TCP_SKB_CB(skb)->seq; in tcp_disordered_ack()
3918 u32 ack = TCP_SKB_CB(skb)->ack_seq; in tcp_disordered_ack()
3921 (th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) && in tcp_disordered_ack()
3934 const struct sk_buff *skb) in tcp_paws_discard() argument
3939 !tcp_disordered_ack(sk, skb); in tcp_paws_discard()
4112 static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb) in tcp_send_dupack() argument
4116 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && in tcp_send_dupack()
4117 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { in tcp_send_dupack()
4122 u32 end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_send_dupack()
4124 if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) in tcp_send_dupack()
4126 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq); in tcp_send_dupack()
4282 struct sk_buff *skb, *tail; in tcp_ofo_queue() local
4285 while ((skb = skb_peek(&tp->out_of_order_queue)) != NULL) { in tcp_ofo_queue()
4286 if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) in tcp_ofo_queue()
4289 if (before(TCP_SKB_CB(skb)->seq, dsack_high)) { in tcp_ofo_queue()
4291 if (before(TCP_SKB_CB(skb)->end_seq, dsack_high)) in tcp_ofo_queue()
4292 dsack_high = TCP_SKB_CB(skb)->end_seq; in tcp_ofo_queue()
4293 tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack); in tcp_ofo_queue()
4296 __skb_unlink(skb, &tp->out_of_order_queue); in tcp_ofo_queue()
4297 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { in tcp_ofo_queue()
4299 __kfree_skb(skb); in tcp_ofo_queue()
4303 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, in tcp_ofo_queue()
4304 TCP_SKB_CB(skb)->end_seq); in tcp_ofo_queue()
4307 eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen); in tcp_ofo_queue()
4308 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); in tcp_ofo_queue()
4310 __skb_queue_tail(&sk->sk_receive_queue, skb); in tcp_ofo_queue()
4311 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) in tcp_ofo_queue()
4314 kfree_skb_partial(skb, fragstolen); in tcp_ofo_queue()
4321 static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb, in tcp_try_rmem_schedule() argument
4325 !sk_rmem_schedule(sk, skb, size)) { in tcp_try_rmem_schedule()
4330 if (!sk_rmem_schedule(sk, skb, size)) { in tcp_try_rmem_schedule()
4334 if (!sk_rmem_schedule(sk, skb, size)) in tcp_try_rmem_schedule()
4341 static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) in tcp_data_queue_ofo() argument
4347 tcp_ecn_check_ce(tp, skb); in tcp_data_queue_ofo()
4349 if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { in tcp_data_queue_ofo()
4351 __kfree_skb(skb); in tcp_data_queue_ofo()
4361 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); in tcp_data_queue_ofo()
4368 tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq; in tcp_data_queue_ofo()
4370 TCP_SKB_CB(skb)->end_seq; in tcp_data_queue_ofo()
4372 __skb_queue_head(&tp->out_of_order_queue, skb); in tcp_data_queue_ofo()
4376 seq = TCP_SKB_CB(skb)->seq; in tcp_data_queue_ofo()
4377 end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_data_queue_ofo()
4382 if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) { in tcp_data_queue_ofo()
4383 __skb_queue_after(&tp->out_of_order_queue, skb1, skb); in tcp_data_queue_ofo()
4385 tcp_grow_window(sk, skb); in tcp_data_queue_ofo()
4386 kfree_skb_partial(skb, fragstolen); in tcp_data_queue_ofo()
4387 skb = NULL; in tcp_data_queue_ofo()
4415 __kfree_skb(skb); in tcp_data_queue_ofo()
4416 skb = NULL; in tcp_data_queue_ofo()
4435 __skb_queue_head(&tp->out_of_order_queue, skb); in tcp_data_queue_ofo()
4437 __skb_queue_after(&tp->out_of_order_queue, skb1, skb); in tcp_data_queue_ofo()
4440 while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) { in tcp_data_queue_ofo()
4441 skb1 = skb_queue_next(&tp->out_of_order_queue, skb); in tcp_data_queue_ofo()
4461 if (skb) { in tcp_data_queue_ofo()
4462 tcp_grow_window(sk, skb); in tcp_data_queue_ofo()
4463 skb_set_owner_r(skb, sk); in tcp_data_queue_ofo()
4467 static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen, in tcp_queue_rcv() argument
4473 __skb_pull(skb, hdrlen); in tcp_queue_rcv()
4475 tcp_try_coalesce(sk, tail, skb, fragstolen)) ? 1 : 0; in tcp_queue_rcv()
4476 tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq); in tcp_queue_rcv()
4478 __skb_queue_tail(&sk->sk_receive_queue, skb); in tcp_queue_rcv()
4479 skb_set_owner_r(skb, sk); in tcp_queue_rcv()
4486 struct sk_buff *skb; in tcp_send_rcvq() local
4500 skb = alloc_skb_with_frags(size - data_len, data_len, in tcp_send_rcvq()
4503 if (!skb) in tcp_send_rcvq()
4506 skb_put(skb, size - data_len); in tcp_send_rcvq()
4507 skb->data_len = data_len; in tcp_send_rcvq()
4508 skb->len = size; in tcp_send_rcvq()
4510 if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) in tcp_send_rcvq()
4513 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size); in tcp_send_rcvq()
4517 TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; in tcp_send_rcvq()
4518 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size; in tcp_send_rcvq()
4519 TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1; in tcp_send_rcvq()
4521 if (tcp_queue_rcv(sk, skb, 0, &fragstolen)) { in tcp_send_rcvq()
4523 __kfree_skb(skb); in tcp_send_rcvq()
4528 kfree_skb(skb); in tcp_send_rcvq()
4534 static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) in tcp_data_queue() argument
4540 if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) in tcp_data_queue()
4543 skb_dst_drop(skb); in tcp_data_queue()
4544 __skb_pull(skb, tcp_hdr(skb)->doff * 4); in tcp_data_queue()
4546 tcp_ecn_accept_cwr(tp, skb); in tcp_data_queue()
4554 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { in tcp_data_queue()
4562 int chunk = min_t(unsigned int, skb->len, in tcp_data_queue()
4568 if (!skb_copy_datagram_msg(skb, 0, tp->ucopy.msg, chunk)) { in tcp_data_queue()
4571 eaten = (chunk == skb->len); in tcp_data_queue()
4581 sk_forced_mem_schedule(sk, skb->truesize); in tcp_data_queue()
4582 else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) in tcp_data_queue()
4585 eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); in tcp_data_queue()
4587 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); in tcp_data_queue()
4588 if (skb->len) in tcp_data_queue()
4589 tcp_event_data_recv(sk, skb); in tcp_data_queue()
4590 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) in tcp_data_queue()
4609 kfree_skb_partial(skb, fragstolen); in tcp_data_queue()
4615 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { in tcp_data_queue()
4618 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); in tcp_data_queue()
4624 __kfree_skb(skb); in tcp_data_queue()
4629 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) in tcp_data_queue()
4634 if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { in tcp_data_queue()
4637 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, in tcp_data_queue()
4638 TCP_SKB_CB(skb)->end_seq); in tcp_data_queue()
4640 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); in tcp_data_queue()
4650 tcp_data_queue_ofo(sk, skb); in tcp_data_queue()
4653 static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, in tcp_collapse_one() argument
4658 if (!skb_queue_is_last(list, skb)) in tcp_collapse_one()
4659 next = skb_queue_next(list, skb); in tcp_collapse_one()
4661 __skb_unlink(skb, list); in tcp_collapse_one()
4662 __kfree_skb(skb); in tcp_collapse_one()
4681 struct sk_buff *skb, *n; in tcp_collapse() local
4686 skb = head; in tcp_collapse()
4689 skb_queue_walk_from_safe(list, skb, n) { in tcp_collapse()
4690 if (skb == tail) in tcp_collapse()
4693 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { in tcp_collapse()
4694 skb = tcp_collapse_one(sk, skb, list); in tcp_collapse()
4695 if (!skb) in tcp_collapse()
4705 if (!(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)) && in tcp_collapse()
4706 (tcp_win_from_space(skb->truesize) > skb->len || in tcp_collapse()
4707 before(TCP_SKB_CB(skb)->seq, start))) { in tcp_collapse()
4712 if (!skb_queue_is_last(list, skb)) { in tcp_collapse()
4713 struct sk_buff *next = skb_queue_next(list, skb); in tcp_collapse()
4715 TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(next)->seq) { in tcp_collapse()
4722 start = TCP_SKB_CB(skb)->end_seq; in tcp_collapse()
4725 (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN))) in tcp_collapse()
4736 memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); in tcp_collapse()
4738 __skb_queue_before(list, skb, nskb); in tcp_collapse()
4743 int offset = start - TCP_SKB_CB(skb)->seq; in tcp_collapse()
4744 int size = TCP_SKB_CB(skb)->end_seq - start; in tcp_collapse()
4749 if (skb_copy_bits(skb, offset, skb_put(nskb, size), size)) in tcp_collapse()
4755 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { in tcp_collapse()
4756 skb = tcp_collapse_one(sk, skb, list); in tcp_collapse()
4757 if (!skb || in tcp_collapse()
4758 skb == tail || in tcp_collapse()
4759 (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN))) in tcp_collapse()
4772 struct sk_buff *skb = skb_peek(&tp->out_of_order_queue); in tcp_collapse_ofo_queue() local
4776 if (!skb) in tcp_collapse_ofo_queue()
4779 start = TCP_SKB_CB(skb)->seq; in tcp_collapse_ofo_queue()
4780 end = TCP_SKB_CB(skb)->end_seq; in tcp_collapse_ofo_queue()
4781 head = skb; in tcp_collapse_ofo_queue()
4786 if (!skb_queue_is_last(&tp->out_of_order_queue, skb)) in tcp_collapse_ofo_queue()
4787 next = skb_queue_next(&tp->out_of_order_queue, skb); in tcp_collapse_ofo_queue()
4788 skb = next; in tcp_collapse_ofo_queue()
4792 if (!skb || in tcp_collapse_ofo_queue()
4793 after(TCP_SKB_CB(skb)->seq, end) || in tcp_collapse_ofo_queue()
4794 before(TCP_SKB_CB(skb)->end_seq, start)) { in tcp_collapse_ofo_queue()
4796 head, skb, start, end); in tcp_collapse_ofo_queue()
4797 head = skb; in tcp_collapse_ofo_queue()
4798 if (!skb) in tcp_collapse_ofo_queue()
4801 start = TCP_SKB_CB(skb)->seq; in tcp_collapse_ofo_queue()
4802 end = TCP_SKB_CB(skb)->end_seq; in tcp_collapse_ofo_queue()
4804 if (before(TCP_SKB_CB(skb)->seq, start)) in tcp_collapse_ofo_queue()
4805 start = TCP_SKB_CB(skb)->seq; in tcp_collapse_ofo_queue()
4806 if (after(TCP_SKB_CB(skb)->end_seq, end)) in tcp_collapse_ofo_queue()
4807 end = TCP_SKB_CB(skb)->end_seq; in tcp_collapse_ofo_queue()
5043 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); in tcp_check_urg() local
5045 if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) { in tcp_check_urg()
5046 __skb_unlink(skb, &sk->sk_receive_queue); in tcp_check_urg()
5047 __kfree_skb(skb); in tcp_check_urg()
5059 static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th) in tcp_urg() argument
5073 if (ptr < skb->len) { in tcp_urg()
5075 if (skb_copy_bits(skb, ptr, &tmp, 1)) in tcp_urg()
5084 static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) in tcp_copy_to_iovec() argument
5087 int chunk = skb->len - hlen; in tcp_copy_to_iovec()
5091 if (skb_csum_unnecessary(skb)) in tcp_copy_to_iovec()
5092 err = skb_copy_datagram_msg(skb, hlen, tp->ucopy.msg, chunk); in tcp_copy_to_iovec()
5094 err = skb_copy_and_csum_datagram_msg(skb, hlen, tp->ucopy.msg); in tcp_copy_to_iovec()
5107 struct sk_buff *skb) in __tcp_checksum_complete_user() argument
5113 result = __tcp_checksum_complete(skb); in __tcp_checksum_complete_user()
5116 result = __tcp_checksum_complete(skb); in __tcp_checksum_complete_user()
5122 struct sk_buff *skb) in tcp_checksum_complete_user() argument
5124 return !skb_csum_unnecessary(skb) && in tcp_checksum_complete_user()
5125 __tcp_checksum_complete_user(sk, skb); in tcp_checksum_complete_user()
5131 static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, in tcp_validate_incoming() argument
5137 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && in tcp_validate_incoming()
5138 tcp_paws_discard(sk, skb)) { in tcp_validate_incoming()
5141 if (!tcp_oow_rate_limited(sock_net(sk), skb, in tcp_validate_incoming()
5144 tcp_send_dupack(sk, skb); in tcp_validate_incoming()
5151 if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) { in tcp_validate_incoming()
5161 if (!tcp_oow_rate_limited(sock_net(sk), skb, in tcp_validate_incoming()
5164 tcp_send_dupack(sk, skb); in tcp_validate_incoming()
5177 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) in tcp_validate_incoming()
5180 tcp_send_challenge_ack(sk, skb); in tcp_validate_incoming()
5194 tcp_send_challenge_ack(sk, skb); in tcp_validate_incoming()
5201 __kfree_skb(skb); in tcp_validate_incoming()
5228 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, in tcp_rcv_established() argument
5234 inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); in tcp_rcv_established()
5262 TCP_SKB_CB(skb)->seq == tp->rcv_nxt && in tcp_rcv_established()
5263 !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) { in tcp_rcv_established()
5303 tcp_ack(sk, skb, 0); in tcp_rcv_established()
5304 __kfree_skb(skb); in tcp_rcv_established()
5321 if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) { in tcp_rcv_established()
5332 tcp_rcv_rtt_measure_ts(sk, skb); in tcp_rcv_established()
5334 __skb_pull(skb, tcp_header_len); in tcp_rcv_established()
5335 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); in tcp_rcv_established()
5341 if (tcp_checksum_complete_user(sk, skb)) in tcp_rcv_established()
5344 if ((int)skb->truesize > sk->sk_forward_alloc) in tcp_rcv_established()
5356 tcp_rcv_rtt_measure_ts(sk, skb); in tcp_rcv_established()
5361 eaten = tcp_queue_rcv(sk, skb, tcp_header_len, in tcp_rcv_established()
5365 tcp_event_data_recv(sk, skb); in tcp_rcv_established()
5367 if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) { in tcp_rcv_established()
5369 tcp_ack(sk, skb, FLAG_DATA); in tcp_rcv_established()
5378 kfree_skb_partial(skb, fragstolen); in tcp_rcv_established()
5385 if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb)) in tcp_rcv_established()
5395 if (!tcp_validate_incoming(sk, skb, th, 1)) in tcp_rcv_established()
5399 if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0) in tcp_rcv_established()
5402 tcp_rcv_rtt_measure_ts(sk, skb); in tcp_rcv_established()
5405 tcp_urg(sk, skb, th); in tcp_rcv_established()
5408 tcp_data_queue(sk, skb); in tcp_rcv_established()
5419 __kfree_skb(skb); in tcp_rcv_established()
5423 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) in tcp_finish_connect() argument
5430 if (skb) { in tcp_finish_connect()
5431 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); in tcp_finish_connect()
5432 security_inet_conn_established(sk, skb); in tcp_finish_connect()
5517 static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, in tcp_rcv_synsent_state_process() argument
5525 tcp_parse_options(skb, &tp->rx_opt, 0, &foc); in tcp_rcv_synsent_state_process()
5538 if (!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_una) || in tcp_rcv_synsent_state_process()
5539 after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) in tcp_rcv_synsent_state_process()
5581 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); in tcp_rcv_synsent_state_process()
5582 tcp_ack(sk, skb, FLAG_SLOWPATH); in tcp_rcv_synsent_state_process()
5587 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; in tcp_rcv_synsent_state_process()
5588 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; in tcp_rcv_synsent_state_process()
5624 tcp_finish_connect(sk, skb); in tcp_rcv_synsent_state_process()
5627 tcp_rcv_fastopen_synack(sk, skb, &foc)) in tcp_rcv_synsent_state_process()
5647 __kfree_skb(skb); in tcp_rcv_synsent_state_process()
5688 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; in tcp_rcv_synsent_state_process()
5690 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; in tcp_rcv_synsent_state_process()
5696 tp->snd_wl1 = TCP_SKB_CB(skb)->seq; in tcp_rcv_synsent_state_process()
5745 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) in tcp_rcv_state_process() argument
5749 const struct tcphdr *th = tcp_hdr(skb); in tcp_rcv_state_process()
5770 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) in tcp_rcv_state_process()
5790 kfree_skb(skb); in tcp_rcv_state_process()
5796 queued = tcp_rcv_synsent_state_process(sk, skb, th); in tcp_rcv_state_process()
5801 tcp_urg(sk, skb, th); in tcp_rcv_state_process()
5802 __kfree_skb(skb); in tcp_rcv_state_process()
5812 if (!tcp_check_req(sk, skb, req, true)) in tcp_rcv_state_process()
5819 if (!tcp_validate_incoming(sk, skb, th, 0)) in tcp_rcv_state_process()
5823 acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH | in tcp_rcv_state_process()
5860 tp->snd_una = TCP_SKB_CB(skb)->ack_seq; in tcp_rcv_state_process()
5862 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); in tcp_rcv_state_process()
5928 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && in tcp_rcv_state_process()
5929 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) { in tcp_rcv_state_process()
5970 tcp_urg(sk, skb, th); in tcp_rcv_state_process()
5977 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) in tcp_rcv_state_process()
5986 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && in tcp_rcv_state_process()
5987 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { in tcp_rcv_state_process()
5995 tcp_data_queue(sk, skb); in tcp_rcv_state_process()
6008 __kfree_skb(skb); in tcp_rcv_state_process()
6041 const struct sk_buff *skb, in tcp_ecn_create_request() argument
6045 const struct tcphdr *th = tcp_hdr(skb); in tcp_ecn_create_request()
6054 ect = !INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield); in tcp_ecn_create_request()
6065 struct sk_buff *skb, const struct sock *sk) in tcp_openreq_init() argument
6071 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; in tcp_openreq_init()
6072 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; in tcp_openreq_init()
6083 ireq->ir_rmt_port = tcp_hdr(skb)->source; in tcp_openreq_init()
6084 ireq->ir_num = ntohs(tcp_hdr(skb)->dest); in tcp_openreq_init()
6085 ireq->ir_mark = inet_request_mark(sk, skb); in tcp_openreq_init()
6114 const struct sk_buff *skb, in tcp_syn_flood_action() argument
6134 proto, ntohs(tcp_hdr(skb)->dest), msg); in tcp_syn_flood_action()
6141 const struct sk_buff *skb) in tcp_reqsk_record_syn() argument
6144 u32 len = skb_network_header_len(skb) + tcp_hdrlen(skb); in tcp_reqsk_record_syn()
6150 memcpy(&copy[1], skb_network_header(skb), len); in tcp_reqsk_record_syn()
6158 struct sock *sk, struct sk_buff *skb) in tcp_conn_request() argument
6161 __u32 isn = TCP_SKB_CB(skb)->tcp_tw_isn; in tcp_conn_request()
6176 want_cookie = tcp_syn_flood_action(sk, skb, rsk_ops->slab_name); in tcp_conn_request()
6201 tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc); in tcp_conn_request()
6207 tcp_openreq_init(req, &tmp_opt, skb, sk); in tcp_conn_request()
6212 af_ops->init_req(req, sk, skb); in tcp_conn_request()
6214 if (security_inet_conn_request(sk, skb, req)) in tcp_conn_request()
6252 pr_drop_req(req, ntohs(tcp_hdr(skb)->source), in tcp_conn_request()
6257 isn = af_ops->init_seq(skb); in tcp_conn_request()
6265 tcp_ecn_create_request(req, skb, sk, dst); in tcp_conn_request()
6268 isn = cookie_init_sequence(af_ops, sk, skb, &req->mss); in tcp_conn_request()
6278 tcp_reqsk_record_syn(sk, req, skb); in tcp_conn_request()
6279 fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc, dst); in tcp_conn_request()