Lines Matching refs:skb
130 static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) in tcp_measure_rcv_mss() argument
141 len = skb_shinfo(skb)->gso_size ? : skb->len; in tcp_measure_rcv_mss()
150 len += skb->data - skb_transport_header(skb); in tcp_measure_rcv_mss()
158 !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) { in tcp_measure_rcv_mss()
212 static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb) in tcp_ecn_accept_cwr() argument
214 if (tcp_hdr(skb)->cwr) in tcp_ecn_accept_cwr()
223 static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) in __tcp_ecn_check_ce() argument
225 switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) { in __tcp_ecn_check_ce()
253 static void tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) in tcp_ecn_check_ce() argument
256 __tcp_ecn_check_ce(tp, skb); in tcp_ecn_check_ce()
338 static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb) in __tcp_grow_window() argument
342 int truesize = tcp_win_from_space(skb->truesize) >> 1; in __tcp_grow_window()
346 if (truesize <= skb->len) in __tcp_grow_window()
355 static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) in tcp_grow_window() argument
368 if (tcp_win_from_space(skb->truesize) <= skb->len) in tcp_grow_window()
371 incr = __tcp_grow_window(sk, skb); in tcp_grow_window()
374 incr = max_t(int, incr, 2 * skb->len); in tcp_grow_window()
539 const struct sk_buff *skb) in tcp_rcv_rtt_measure_ts() argument
543 (TCP_SKB_CB(skb)->end_seq - in tcp_rcv_rtt_measure_ts()
544 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) in tcp_rcv_rtt_measure_ts()
628 static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) in tcp_event_data_recv() argument
636 tcp_measure_rcv_mss(sk, skb); in tcp_event_data_recv()
668 tcp_ecn_check_ce(tp, skb); in tcp_event_data_recv()
670 if (skb->len >= 128) in tcp_event_data_recv()
671 tcp_grow_window(sk, skb); in tcp_event_data_recv()
867 static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) in tcp_verify_retransmit_hint() argument
870 before(TCP_SKB_CB(skb)->seq, in tcp_verify_retransmit_hint()
872 tp->retransmit_skb_hint = skb; in tcp_verify_retransmit_hint()
875 after(TCP_SKB_CB(skb)->end_seq, tp->retransmit_high)) in tcp_verify_retransmit_hint()
876 tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; in tcp_verify_retransmit_hint()
879 static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb) in tcp_skb_mark_lost() argument
881 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { in tcp_skb_mark_lost()
882 tcp_verify_retransmit_hint(tp, skb); in tcp_skb_mark_lost()
884 tp->lost_out += tcp_skb_pcount(skb); in tcp_skb_mark_lost()
885 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; in tcp_skb_mark_lost()
890 struct sk_buff *skb) in tcp_skb_mark_lost_uncond_verify() argument
892 tcp_verify_retransmit_hint(tp, skb); in tcp_skb_mark_lost_uncond_verify()
894 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { in tcp_skb_mark_lost_uncond_verify()
895 tp->lost_out += tcp_skb_pcount(skb); in tcp_skb_mark_lost_uncond_verify()
896 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; in tcp_skb_mark_lost_uncond_verify()
1044 struct sk_buff *skb; in tcp_mark_lost_retrans() local
1054 tcp_for_write_queue(skb, sk) { in tcp_mark_lost_retrans()
1055 u32 ack_seq = TCP_SKB_CB(skb)->ack_seq; in tcp_mark_lost_retrans()
1057 if (skb == tcp_send_head(sk)) in tcp_mark_lost_retrans()
1061 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) in tcp_mark_lost_retrans()
1064 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)) in tcp_mark_lost_retrans()
1079 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; in tcp_mark_lost_retrans()
1080 tp->retrans_out -= tcp_skb_pcount(skb); in tcp_mark_lost_retrans()
1082 tcp_skb_mark_lost_uncond_verify(tp, skb); in tcp_mark_lost_retrans()
1087 cnt += tcp_skb_pcount(skb); in tcp_mark_lost_retrans()
1145 static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, in tcp_match_skb_to_sack() argument
1153 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && in tcp_match_skb_to_sack()
1154 !before(end_seq, TCP_SKB_CB(skb)->end_seq); in tcp_match_skb_to_sack()
1156 if (tcp_skb_pcount(skb) > 1 && !in_sack && in tcp_match_skb_to_sack()
1157 after(TCP_SKB_CB(skb)->end_seq, start_seq)) { in tcp_match_skb_to_sack()
1158 mss = tcp_skb_mss(skb); in tcp_match_skb_to_sack()
1159 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); in tcp_match_skb_to_sack()
1162 pkt_len = start_seq - TCP_SKB_CB(skb)->seq; in tcp_match_skb_to_sack()
1166 pkt_len = end_seq - TCP_SKB_CB(skb)->seq; in tcp_match_skb_to_sack()
1178 if (new_len >= skb->len) in tcp_match_skb_to_sack()
1183 err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC); in tcp_match_skb_to_sack()
1282 static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, in tcp_shifted_skb() argument
1288 struct sk_buff *prev = tcp_write_queue_prev(sk, skb); in tcp_shifted_skb()
1289 u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */ in tcp_shifted_skb()
1300 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, in tcp_shifted_skb()
1302 &skb->skb_mstamp); in tcp_shifted_skb()
1304 if (skb == tp->lost_skb_hint) in tcp_shifted_skb()
1308 TCP_SKB_CB(skb)->seq += shifted; in tcp_shifted_skb()
1311 BUG_ON(tcp_skb_pcount(skb) < pcount); in tcp_shifted_skb()
1312 tcp_skb_pcount_add(skb, -pcount); in tcp_shifted_skb()
1325 if (tcp_skb_pcount(skb) <= 1) { in tcp_shifted_skb()
1326 skb_shinfo(skb)->gso_size = 0; in tcp_shifted_skb()
1327 skb_shinfo(skb)->gso_type = 0; in tcp_shifted_skb()
1331 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); in tcp_shifted_skb()
1333 if (skb->len > 0) { in tcp_shifted_skb()
1334 BUG_ON(!tcp_skb_pcount(skb)); in tcp_shifted_skb()
1341 if (skb == tp->retransmit_skb_hint) in tcp_shifted_skb()
1343 if (skb == tp->lost_skb_hint) { in tcp_shifted_skb()
1348 TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; in tcp_shifted_skb()
1349 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) in tcp_shifted_skb()
1352 if (skb == tcp_highest_sack(sk)) in tcp_shifted_skb()
1353 tcp_advance_highest_sack(sk, skb); in tcp_shifted_skb()
1355 tcp_unlink_write_queue(skb, sk); in tcp_shifted_skb()
1356 sk_wmem_free_skb(sk, skb); in tcp_shifted_skb()
1366 static int tcp_skb_seglen(const struct sk_buff *skb) in tcp_skb_seglen() argument
1368 return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb); in tcp_skb_seglen()
1372 static int skb_can_shift(const struct sk_buff *skb) in skb_can_shift() argument
1374 return !skb_headlen(skb) && skb_is_nonlinear(skb); in skb_can_shift()
1380 static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, in tcp_shift_skb_data() argument
1397 (TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS) in tcp_shift_skb_data()
1399 if (!skb_can_shift(skb)) in tcp_shift_skb_data()
1402 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) in tcp_shift_skb_data()
1406 if (unlikely(skb == tcp_write_queue_head(sk))) in tcp_shift_skb_data()
1408 prev = tcp_write_queue_prev(sk, skb); in tcp_shift_skb_data()
1413 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && in tcp_shift_skb_data()
1414 !before(end_seq, TCP_SKB_CB(skb)->end_seq); in tcp_shift_skb_data()
1417 len = skb->len; in tcp_shift_skb_data()
1418 pcount = tcp_skb_pcount(skb); in tcp_shift_skb_data()
1419 mss = tcp_skb_seglen(skb); in tcp_shift_skb_data()
1427 if (!after(TCP_SKB_CB(skb)->end_seq, start_seq)) in tcp_shift_skb_data()
1433 if (tcp_skb_pcount(skb) <= 1) in tcp_shift_skb_data()
1436 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); in tcp_shift_skb_data()
1452 len = end_seq - TCP_SKB_CB(skb)->seq; in tcp_shift_skb_data()
1454 BUG_ON(len > skb->len); in tcp_shift_skb_data()
1460 mss = tcp_skb_mss(skb); in tcp_shift_skb_data()
1479 if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una)) in tcp_shift_skb_data()
1482 if (!skb_shift(prev, skb, len)) in tcp_shift_skb_data()
1484 if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack)) in tcp_shift_skb_data()
1492 skb = tcp_write_queue_next(sk, prev); in tcp_shift_skb_data()
1494 if (!skb_can_shift(skb) || in tcp_shift_skb_data()
1495 (skb == tcp_send_head(sk)) || in tcp_shift_skb_data()
1496 ((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) || in tcp_shift_skb_data()
1497 (mss != tcp_skb_seglen(skb))) in tcp_shift_skb_data()
1500 len = skb->len; in tcp_shift_skb_data()
1501 if (skb_shift(prev, skb, len)) { in tcp_shift_skb_data()
1502 pcount += tcp_skb_pcount(skb); in tcp_shift_skb_data()
1503 tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0); in tcp_shift_skb_data()
1511 return skb; in tcp_shift_skb_data()
1518 static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, in tcp_sacktag_walk() argument
1527 tcp_for_write_queue_from(skb, sk) { in tcp_sacktag_walk()
1531 if (skb == tcp_send_head(sk)) in tcp_sacktag_walk()
1535 if (!before(TCP_SKB_CB(skb)->seq, end_seq)) in tcp_sacktag_walk()
1539 before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) { in tcp_sacktag_walk()
1540 in_sack = tcp_match_skb_to_sack(sk, skb, in tcp_sacktag_walk()
1552 tmp = tcp_shift_skb_data(sk, skb, state, in tcp_sacktag_walk()
1555 if (tmp != skb) { in tcp_sacktag_walk()
1556 skb = tmp; in tcp_sacktag_walk()
1562 in_sack = tcp_match_skb_to_sack(sk, skb, in tcp_sacktag_walk()
1572 TCP_SKB_CB(skb)->sacked = in tcp_sacktag_walk()
1575 TCP_SKB_CB(skb)->sacked, in tcp_sacktag_walk()
1576 TCP_SKB_CB(skb)->seq, in tcp_sacktag_walk()
1577 TCP_SKB_CB(skb)->end_seq, in tcp_sacktag_walk()
1579 tcp_skb_pcount(skb), in tcp_sacktag_walk()
1580 &skb->skb_mstamp); in tcp_sacktag_walk()
1582 if (!before(TCP_SKB_CB(skb)->seq, in tcp_sacktag_walk()
1584 tcp_advance_highest_sack(sk, skb); in tcp_sacktag_walk()
1587 state->fack_count += tcp_skb_pcount(skb); in tcp_sacktag_walk()
1589 return skb; in tcp_sacktag_walk()
1595 static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk, in tcp_sacktag_skip() argument
1599 tcp_for_write_queue_from(skb, sk) { in tcp_sacktag_skip()
1600 if (skb == tcp_send_head(sk)) in tcp_sacktag_skip()
1603 if (after(TCP_SKB_CB(skb)->end_seq, skip_to_seq)) in tcp_sacktag_skip()
1606 state->fack_count += tcp_skb_pcount(skb); in tcp_sacktag_skip()
1608 return skb; in tcp_sacktag_skip()
1611 static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb, in tcp_maybe_skipping_dsack() argument
1618 return skb; in tcp_maybe_skipping_dsack()
1621 skb = tcp_sacktag_skip(skb, sk, state, next_dup->start_seq); in tcp_maybe_skipping_dsack()
1622 skb = tcp_sacktag_walk(skb, sk, NULL, state, in tcp_maybe_skipping_dsack()
1627 return skb; in tcp_maybe_skipping_dsack()
1646 struct sk_buff *skb; in tcp_sacktag_write_queue() local
1730 skb = tcp_write_queue_head(sk); in tcp_sacktag_write_queue()
1765 skb = tcp_sacktag_skip(skb, sk, &state, in tcp_sacktag_write_queue()
1767 skb = tcp_sacktag_walk(skb, sk, next_dup, in tcp_sacktag_write_queue()
1778 skb = tcp_maybe_skipping_dsack(skb, sk, next_dup, in tcp_sacktag_write_queue()
1785 skb = tcp_highest_sack(sk); in tcp_sacktag_write_queue()
1786 if (!skb) in tcp_sacktag_write_queue()
1793 skb = tcp_sacktag_skip(skb, sk, &state, cache->end_seq); in tcp_sacktag_write_queue()
1800 skb = tcp_highest_sack(sk); in tcp_sacktag_write_queue()
1801 if (!skb) in tcp_sacktag_write_queue()
1805 skb = tcp_sacktag_skip(skb, sk, &state, start_seq); in tcp_sacktag_write_queue()
1808 skb = tcp_sacktag_walk(skb, sk, next_dup, &state, in tcp_sacktag_write_queue()
1926 struct sk_buff *skb; in tcp_enter_loss() local
1950 skb = tcp_write_queue_head(sk); in tcp_enter_loss()
1951 is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED); in tcp_enter_loss()
1959 tcp_for_write_queue(skb, sk) { in tcp_enter_loss()
1960 if (skb == tcp_send_head(sk)) in tcp_enter_loss()
1963 TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED; in tcp_enter_loss()
1964 if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || is_reneg) { in tcp_enter_loss()
1965 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; in tcp_enter_loss()
1966 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; in tcp_enter_loss()
1967 tp->lost_out += tcp_skb_pcount(skb); in tcp_enter_loss()
1968 tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; in tcp_enter_loss()
2217 struct sk_buff *skb; in tcp_mark_head_lost() local
2226 skb = tp->lost_skb_hint; in tcp_mark_head_lost()
2229 if (mark_head && skb != tcp_write_queue_head(sk)) in tcp_mark_head_lost()
2232 skb = tcp_write_queue_head(sk); in tcp_mark_head_lost()
2236 tcp_for_write_queue_from(skb, sk) { in tcp_mark_head_lost()
2237 if (skb == tcp_send_head(sk)) in tcp_mark_head_lost()
2241 tp->lost_skb_hint = skb; in tcp_mark_head_lost()
2244 if (after(TCP_SKB_CB(skb)->end_seq, loss_high)) in tcp_mark_head_lost()
2249 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) in tcp_mark_head_lost()
2250 cnt += tcp_skb_pcount(skb); in tcp_mark_head_lost()
2254 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) || in tcp_mark_head_lost()
2258 mss = skb_shinfo(skb)->gso_size; in tcp_mark_head_lost()
2259 err = tcp_fragment(sk, skb, (packets - oldcnt) * mss, in tcp_mark_head_lost()
2266 tcp_skb_mark_lost(tp, skb); in tcp_mark_head_lost()
2335 struct sk_buff *skb; in tcp_any_retrans_done() local
2340 skb = tcp_write_queue_head(sk); in tcp_any_retrans_done()
2341 if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) in tcp_any_retrans_done()
2382 struct sk_buff *skb; in tcp_undo_cwnd_reduction() local
2384 tcp_for_write_queue(skb, sk) { in tcp_undo_cwnd_reduction()
2385 if (skb == tcp_send_head(sk)) in tcp_undo_cwnd_reduction()
2387 TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; in tcp_undo_cwnd_reduction()
2627 struct sk_buff *skb; in tcp_simple_retransmit() local
2631 tcp_for_write_queue(skb, sk) { in tcp_simple_retransmit()
2632 if (skb == tcp_send_head(sk)) in tcp_simple_retransmit()
2634 if (tcp_skb_seglen(skb) > mss && in tcp_simple_retransmit()
2635 !(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { in tcp_simple_retransmit()
2636 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { in tcp_simple_retransmit()
2637 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; in tcp_simple_retransmit()
2638 tp->retrans_out -= tcp_skb_pcount(skb); in tcp_simple_retransmit()
2640 tcp_skb_mark_lost_uncond_verify(tp, skb); in tcp_simple_retransmit()
2984 struct sk_buff *skb = tcp_write_queue_head(sk); in tcp_rearm_rto() local
2986 tcp_skb_timestamp(skb) + rto; in tcp_rearm_rto()
3018 static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) in tcp_tso_acked() argument
3023 BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)); in tcp_tso_acked()
3025 packets_acked = tcp_skb_pcount(skb); in tcp_tso_acked()
3026 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) in tcp_tso_acked()
3028 packets_acked -= tcp_skb_pcount(skb); in tcp_tso_acked()
3031 BUG_ON(tcp_skb_pcount(skb) == 0); in tcp_tso_acked()
3032 BUG_ON(!before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)); in tcp_tso_acked()
3038 static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb, in tcp_ack_tstamp() argument
3047 shinfo = skb_shinfo(skb); in tcp_ack_tstamp()
3050 __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK); in tcp_ack_tstamp()
3068 struct sk_buff *skb; in tcp_clean_rtx_queue() local
3075 while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { in tcp_clean_rtx_queue()
3076 struct tcp_skb_cb *scb = TCP_SKB_CB(skb); in tcp_clean_rtx_queue()
3080 tcp_ack_tstamp(sk, skb, prior_snd_una); in tcp_clean_rtx_queue()
3084 if (tcp_skb_pcount(skb) == 1 || in tcp_clean_rtx_queue()
3088 acked_pcount = tcp_tso_acked(sk, skb); in tcp_clean_rtx_queue()
3095 prefetchw(skb->next); in tcp_clean_rtx_queue()
3096 acked_pcount = tcp_skb_pcount(skb); in tcp_clean_rtx_queue()
3104 last_ackt = skb->skb_mstamp; in tcp_clean_rtx_queue()
3139 tcp_unlink_write_queue(skb, sk); in tcp_clean_rtx_queue()
3140 sk_wmem_free_skb(sk, skb); in tcp_clean_rtx_queue()
3141 if (unlikely(skb == tp->retransmit_skb_hint)) in tcp_clean_rtx_queue()
3143 if (unlikely(skb == tp->lost_skb_hint)) in tcp_clean_rtx_queue()
3150 if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) in tcp_clean_rtx_queue()
3192 } else if (skb && rtt_update && sack_rtt_us >= 0 && in tcp_clean_rtx_queue()
3193 sack_rtt_us > skb_mstamp_us_delta(&now, &skb->skb_mstamp)) { in tcp_clean_rtx_queue()
3311 static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack, in tcp_ack_update_window() argument
3316 u32 nwin = ntohs(tcp_hdr(skb)->window); in tcp_ack_update_window()
3318 if (likely(!tcp_hdr(skb)->syn)) in tcp_ack_update_window()
3353 bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb, in tcp_oow_rate_limited() argument
3357 if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) && in tcp_oow_rate_limited()
3358 !tcp_hdr(skb)->syn) in tcp_oow_rate_limited()
3377 static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb) in tcp_send_challenge_ack() argument
3386 if (tcp_oow_rate_limited(sock_net(sk), skb, in tcp_send_challenge_ack()
3465 static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) in tcp_ack() argument
3470 u32 ack_seq = TCP_SKB_CB(skb)->seq; in tcp_ack()
3471 u32 ack = TCP_SKB_CB(skb)->ack_seq; in tcp_ack()
3488 tcp_send_challenge_ack(sk, skb); in tcp_ack()
3515 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); in tcp_ack()
3532 if (ack_seq != TCP_SKB_CB(skb)->end_seq) in tcp_ack()
3537 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); in tcp_ack()
3539 if (TCP_SKB_CB(skb)->sacked) in tcp_ack()
3540 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, in tcp_ack()
3543 if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) { in tcp_ack()
3616 if (TCP_SKB_CB(skb)->sacked) { in tcp_ack()
3617 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, in tcp_ack()
3648 void tcp_parse_options(const struct sk_buff *skb, in tcp_parse_options() argument
3653 const struct tcphdr *th = tcp_hdr(skb); in tcp_parse_options()
3722 TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th; in tcp_parse_options()
3781 static bool tcp_fast_parse_options(const struct sk_buff *skb, in tcp_fast_parse_options() argument
3796 tcp_parse_options(skb, &tp->rx_opt, 1, NULL); in tcp_fast_parse_options()
3864 static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb) in tcp_disordered_ack() argument
3867 const struct tcphdr *th = tcp_hdr(skb); in tcp_disordered_ack()
3868 u32 seq = TCP_SKB_CB(skb)->seq; in tcp_disordered_ack()
3869 u32 ack = TCP_SKB_CB(skb)->ack_seq; in tcp_disordered_ack()
3872 (th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) && in tcp_disordered_ack()
3885 const struct sk_buff *skb) in tcp_paws_discard() argument
3890 !tcp_disordered_ack(sk, skb); in tcp_paws_discard()
4066 static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb) in tcp_send_dupack() argument
4070 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && in tcp_send_dupack()
4071 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { in tcp_send_dupack()
4076 u32 end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_send_dupack()
4078 if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) in tcp_send_dupack()
4080 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq); in tcp_send_dupack()
4236 struct sk_buff *skb, *tail; in tcp_ofo_queue() local
4239 while ((skb = skb_peek(&tp->out_of_order_queue)) != NULL) { in tcp_ofo_queue()
4240 if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) in tcp_ofo_queue()
4243 if (before(TCP_SKB_CB(skb)->seq, dsack_high)) { in tcp_ofo_queue()
4245 if (before(TCP_SKB_CB(skb)->end_seq, dsack_high)) in tcp_ofo_queue()
4246 dsack_high = TCP_SKB_CB(skb)->end_seq; in tcp_ofo_queue()
4247 tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack); in tcp_ofo_queue()
4250 __skb_unlink(skb, &tp->out_of_order_queue); in tcp_ofo_queue()
4251 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { in tcp_ofo_queue()
4253 __kfree_skb(skb); in tcp_ofo_queue()
4257 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, in tcp_ofo_queue()
4258 TCP_SKB_CB(skb)->end_seq); in tcp_ofo_queue()
4261 eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen); in tcp_ofo_queue()
4262 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); in tcp_ofo_queue()
4264 __skb_queue_tail(&sk->sk_receive_queue, skb); in tcp_ofo_queue()
4265 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) in tcp_ofo_queue()
4268 kfree_skb_partial(skb, fragstolen); in tcp_ofo_queue()
4275 static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb, in tcp_try_rmem_schedule() argument
4279 !sk_rmem_schedule(sk, skb, size)) { in tcp_try_rmem_schedule()
4284 if (!sk_rmem_schedule(sk, skb, size)) { in tcp_try_rmem_schedule()
4288 if (!sk_rmem_schedule(sk, skb, size)) in tcp_try_rmem_schedule()
4295 static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) in tcp_data_queue_ofo() argument
4301 tcp_ecn_check_ce(tp, skb); in tcp_data_queue_ofo()
4303 if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { in tcp_data_queue_ofo()
4305 __kfree_skb(skb); in tcp_data_queue_ofo()
4315 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); in tcp_data_queue_ofo()
4322 tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq; in tcp_data_queue_ofo()
4324 TCP_SKB_CB(skb)->end_seq; in tcp_data_queue_ofo()
4326 __skb_queue_head(&tp->out_of_order_queue, skb); in tcp_data_queue_ofo()
4330 seq = TCP_SKB_CB(skb)->seq; in tcp_data_queue_ofo()
4331 end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_data_queue_ofo()
4336 if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) { in tcp_data_queue_ofo()
4337 __skb_queue_after(&tp->out_of_order_queue, skb1, skb); in tcp_data_queue_ofo()
4339 tcp_grow_window(sk, skb); in tcp_data_queue_ofo()
4340 kfree_skb_partial(skb, fragstolen); in tcp_data_queue_ofo()
4341 skb = NULL; in tcp_data_queue_ofo()
4369 __kfree_skb(skb); in tcp_data_queue_ofo()
4370 skb = NULL; in tcp_data_queue_ofo()
4389 __skb_queue_head(&tp->out_of_order_queue, skb); in tcp_data_queue_ofo()
4391 __skb_queue_after(&tp->out_of_order_queue, skb1, skb); in tcp_data_queue_ofo()
4394 while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) { in tcp_data_queue_ofo()
4395 skb1 = skb_queue_next(&tp->out_of_order_queue, skb); in tcp_data_queue_ofo()
4415 if (skb) { in tcp_data_queue_ofo()
4416 tcp_grow_window(sk, skb); in tcp_data_queue_ofo()
4417 skb_set_owner_r(skb, sk); in tcp_data_queue_ofo()
4421 static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen, in tcp_queue_rcv() argument
4427 __skb_pull(skb, hdrlen); in tcp_queue_rcv()
4429 tcp_try_coalesce(sk, tail, skb, fragstolen)) ? 1 : 0; in tcp_queue_rcv()
4430 tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq); in tcp_queue_rcv()
4432 __skb_queue_tail(&sk->sk_receive_queue, skb); in tcp_queue_rcv()
4433 skb_set_owner_r(skb, sk); in tcp_queue_rcv()
4440 struct sk_buff *skb; in tcp_send_rcvq() local
4454 skb = alloc_skb_with_frags(size - data_len, data_len, in tcp_send_rcvq()
4457 if (!skb) in tcp_send_rcvq()
4460 skb_put(skb, size - data_len); in tcp_send_rcvq()
4461 skb->data_len = data_len; in tcp_send_rcvq()
4462 skb->len = size; in tcp_send_rcvq()
4464 if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) in tcp_send_rcvq()
4467 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size); in tcp_send_rcvq()
4471 TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; in tcp_send_rcvq()
4472 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size; in tcp_send_rcvq()
4473 TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1; in tcp_send_rcvq()
4475 if (tcp_queue_rcv(sk, skb, 0, &fragstolen)) { in tcp_send_rcvq()
4477 __kfree_skb(skb); in tcp_send_rcvq()
4482 kfree_skb(skb); in tcp_send_rcvq()
4488 static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) in tcp_data_queue() argument
4494 if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) in tcp_data_queue()
4497 skb_dst_drop(skb); in tcp_data_queue()
4498 __skb_pull(skb, tcp_hdr(skb)->doff * 4); in tcp_data_queue()
4500 tcp_ecn_accept_cwr(tp, skb); in tcp_data_queue()
4508 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { in tcp_data_queue()
4516 int chunk = min_t(unsigned int, skb->len, in tcp_data_queue()
4522 if (!skb_copy_datagram_msg(skb, 0, tp->ucopy.msg, chunk)) { in tcp_data_queue()
4525 eaten = (chunk == skb->len); in tcp_data_queue()
4534 tcp_try_rmem_schedule(sk, skb, skb->truesize)) in tcp_data_queue()
4537 eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); in tcp_data_queue()
4539 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); in tcp_data_queue()
4540 if (skb->len) in tcp_data_queue()
4541 tcp_event_data_recv(sk, skb); in tcp_data_queue()
4542 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) in tcp_data_queue()
4561 kfree_skb_partial(skb, fragstolen); in tcp_data_queue()
4567 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { in tcp_data_queue()
4570 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); in tcp_data_queue()
4576 __kfree_skb(skb); in tcp_data_queue()
4581 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) in tcp_data_queue()
4586 if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { in tcp_data_queue()
4589 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, in tcp_data_queue()
4590 TCP_SKB_CB(skb)->end_seq); in tcp_data_queue()
4592 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); in tcp_data_queue()
4602 tcp_data_queue_ofo(sk, skb); in tcp_data_queue()
4605 static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, in tcp_collapse_one() argument
4610 if (!skb_queue_is_last(list, skb)) in tcp_collapse_one()
4611 next = skb_queue_next(list, skb); in tcp_collapse_one()
4613 __skb_unlink(skb, list); in tcp_collapse_one()
4614 __kfree_skb(skb); in tcp_collapse_one()
4633 struct sk_buff *skb, *n; in tcp_collapse() local
4638 skb = head; in tcp_collapse()
4641 skb_queue_walk_from_safe(list, skb, n) { in tcp_collapse()
4642 if (skb == tail) in tcp_collapse()
4645 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { in tcp_collapse()
4646 skb = tcp_collapse_one(sk, skb, list); in tcp_collapse()
4647 if (!skb) in tcp_collapse()
4657 if (!(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)) && in tcp_collapse()
4658 (tcp_win_from_space(skb->truesize) > skb->len || in tcp_collapse()
4659 before(TCP_SKB_CB(skb)->seq, start))) { in tcp_collapse()
4664 if (!skb_queue_is_last(list, skb)) { in tcp_collapse()
4665 struct sk_buff *next = skb_queue_next(list, skb); in tcp_collapse()
4667 TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(next)->seq) { in tcp_collapse()
4674 start = TCP_SKB_CB(skb)->end_seq; in tcp_collapse()
4677 (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN))) in tcp_collapse()
4688 memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); in tcp_collapse()
4690 __skb_queue_before(list, skb, nskb); in tcp_collapse()
4695 int offset = start - TCP_SKB_CB(skb)->seq; in tcp_collapse()
4696 int size = TCP_SKB_CB(skb)->end_seq - start; in tcp_collapse()
4701 if (skb_copy_bits(skb, offset, skb_put(nskb, size), size)) in tcp_collapse()
4707 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { in tcp_collapse()
4708 skb = tcp_collapse_one(sk, skb, list); in tcp_collapse()
4709 if (!skb || in tcp_collapse()
4710 skb == tail || in tcp_collapse()
4711 (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN))) in tcp_collapse()
4724 struct sk_buff *skb = skb_peek(&tp->out_of_order_queue); in tcp_collapse_ofo_queue() local
4728 if (!skb) in tcp_collapse_ofo_queue()
4731 start = TCP_SKB_CB(skb)->seq; in tcp_collapse_ofo_queue()
4732 end = TCP_SKB_CB(skb)->end_seq; in tcp_collapse_ofo_queue()
4733 head = skb; in tcp_collapse_ofo_queue()
4738 if (!skb_queue_is_last(&tp->out_of_order_queue, skb)) in tcp_collapse_ofo_queue()
4739 next = skb_queue_next(&tp->out_of_order_queue, skb); in tcp_collapse_ofo_queue()
4740 skb = next; in tcp_collapse_ofo_queue()
4744 if (!skb || in tcp_collapse_ofo_queue()
4745 after(TCP_SKB_CB(skb)->seq, end) || in tcp_collapse_ofo_queue()
4746 before(TCP_SKB_CB(skb)->end_seq, start)) { in tcp_collapse_ofo_queue()
4748 head, skb, start, end); in tcp_collapse_ofo_queue()
4749 head = skb; in tcp_collapse_ofo_queue()
4750 if (!skb) in tcp_collapse_ofo_queue()
4753 start = TCP_SKB_CB(skb)->seq; in tcp_collapse_ofo_queue()
4754 end = TCP_SKB_CB(skb)->end_seq; in tcp_collapse_ofo_queue()
4756 if (before(TCP_SKB_CB(skb)->seq, start)) in tcp_collapse_ofo_queue()
4757 start = TCP_SKB_CB(skb)->seq; in tcp_collapse_ofo_queue()
4758 if (after(TCP_SKB_CB(skb)->end_seq, end)) in tcp_collapse_ofo_queue()
4759 end = TCP_SKB_CB(skb)->end_seq; in tcp_collapse_ofo_queue()
4995 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); in tcp_check_urg() local
4997 if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) { in tcp_check_urg()
4998 __skb_unlink(skb, &sk->sk_receive_queue); in tcp_check_urg()
4999 __kfree_skb(skb); in tcp_check_urg()
5011 static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th) in tcp_urg() argument
5025 if (ptr < skb->len) { in tcp_urg()
5027 if (skb_copy_bits(skb, ptr, &tmp, 1)) in tcp_urg()
5036 static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) in tcp_copy_to_iovec() argument
5039 int chunk = skb->len - hlen; in tcp_copy_to_iovec()
5043 if (skb_csum_unnecessary(skb)) in tcp_copy_to_iovec()
5044 err = skb_copy_datagram_msg(skb, hlen, tp->ucopy.msg, chunk); in tcp_copy_to_iovec()
5046 err = skb_copy_and_csum_datagram_msg(skb, hlen, tp->ucopy.msg); in tcp_copy_to_iovec()
5059 struct sk_buff *skb) in __tcp_checksum_complete_user() argument
5065 result = __tcp_checksum_complete(skb); in __tcp_checksum_complete_user()
5068 result = __tcp_checksum_complete(skb); in __tcp_checksum_complete_user()
5074 struct sk_buff *skb) in tcp_checksum_complete_user() argument
5076 return !skb_csum_unnecessary(skb) && in tcp_checksum_complete_user()
5077 __tcp_checksum_complete_user(sk, skb); in tcp_checksum_complete_user()
5083 static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, in tcp_validate_incoming() argument
5089 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && in tcp_validate_incoming()
5090 tcp_paws_discard(sk, skb)) { in tcp_validate_incoming()
5093 if (!tcp_oow_rate_limited(sock_net(sk), skb, in tcp_validate_incoming()
5096 tcp_send_dupack(sk, skb); in tcp_validate_incoming()
5103 if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) { in tcp_validate_incoming()
5113 if (!tcp_oow_rate_limited(sock_net(sk), skb, in tcp_validate_incoming()
5116 tcp_send_dupack(sk, skb); in tcp_validate_incoming()
5129 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) in tcp_validate_incoming()
5132 tcp_send_challenge_ack(sk, skb); in tcp_validate_incoming()
5146 tcp_send_challenge_ack(sk, skb); in tcp_validate_incoming()
5153 __kfree_skb(skb); in tcp_validate_incoming()
5180 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, in tcp_rcv_established() argument
5186 inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); in tcp_rcv_established()
5214 TCP_SKB_CB(skb)->seq == tp->rcv_nxt && in tcp_rcv_established()
5215 !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) { in tcp_rcv_established()
5255 tcp_ack(sk, skb, 0); in tcp_rcv_established()
5256 __kfree_skb(skb); in tcp_rcv_established()
5273 if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) { in tcp_rcv_established()
5284 tcp_rcv_rtt_measure_ts(sk, skb); in tcp_rcv_established()
5286 __skb_pull(skb, tcp_header_len); in tcp_rcv_established()
5287 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); in tcp_rcv_established()
5293 if (tcp_checksum_complete_user(sk, skb)) in tcp_rcv_established()
5296 if ((int)skb->truesize > sk->sk_forward_alloc) in tcp_rcv_established()
5308 tcp_rcv_rtt_measure_ts(sk, skb); in tcp_rcv_established()
5313 eaten = tcp_queue_rcv(sk, skb, tcp_header_len, in tcp_rcv_established()
5317 tcp_event_data_recv(sk, skb); in tcp_rcv_established()
5319 if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) { in tcp_rcv_established()
5321 tcp_ack(sk, skb, FLAG_DATA); in tcp_rcv_established()
5330 kfree_skb_partial(skb, fragstolen); in tcp_rcv_established()
5337 if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb)) in tcp_rcv_established()
5347 if (!tcp_validate_incoming(sk, skb, th, 1)) in tcp_rcv_established()
5351 if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0) in tcp_rcv_established()
5354 tcp_rcv_rtt_measure_ts(sk, skb); in tcp_rcv_established()
5357 tcp_urg(sk, skb, th); in tcp_rcv_established()
5360 tcp_data_queue(sk, skb); in tcp_rcv_established()
5371 __kfree_skb(skb); in tcp_rcv_established()
5375 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) in tcp_finish_connect() argument
5382 if (skb) { in tcp_finish_connect()
5383 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); in tcp_finish_connect()
5384 security_inet_conn_established(sk, skb); in tcp_finish_connect()
5469 static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, in tcp_rcv_synsent_state_process() argument
5477 tcp_parse_options(skb, &tp->rx_opt, 0, &foc); in tcp_rcv_synsent_state_process()
5490 if (!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_una) || in tcp_rcv_synsent_state_process()
5491 after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) in tcp_rcv_synsent_state_process()
5533 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); in tcp_rcv_synsent_state_process()
5534 tcp_ack(sk, skb, FLAG_SLOWPATH); in tcp_rcv_synsent_state_process()
5539 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; in tcp_rcv_synsent_state_process()
5540 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; in tcp_rcv_synsent_state_process()
5576 tcp_finish_connect(sk, skb); in tcp_rcv_synsent_state_process()
5579 tcp_rcv_fastopen_synack(sk, skb, &foc)) in tcp_rcv_synsent_state_process()
5599 __kfree_skb(skb); in tcp_rcv_synsent_state_process()
5640 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; in tcp_rcv_synsent_state_process()
5642 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; in tcp_rcv_synsent_state_process()
5648 tp->snd_wl1 = TCP_SKB_CB(skb)->seq; in tcp_rcv_synsent_state_process()
5697 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, in tcp_rcv_state_process() argument
5723 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) in tcp_rcv_state_process()
5743 kfree_skb(skb); in tcp_rcv_state_process()
5749 queued = tcp_rcv_synsent_state_process(sk, skb, th, len); in tcp_rcv_state_process()
5754 tcp_urg(sk, skb, th); in tcp_rcv_state_process()
5755 __kfree_skb(skb); in tcp_rcv_state_process()
5765 if (!tcp_check_req(sk, skb, req, true)) in tcp_rcv_state_process()
5772 if (!tcp_validate_incoming(sk, skb, th, 0)) in tcp_rcv_state_process()
5776 acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH | in tcp_rcv_state_process()
5812 tp->snd_una = TCP_SKB_CB(skb)->ack_seq; in tcp_rcv_state_process()
5814 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); in tcp_rcv_state_process()
5881 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && in tcp_rcv_state_process()
5882 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) { in tcp_rcv_state_process()
5923 tcp_urg(sk, skb, th); in tcp_rcv_state_process()
5930 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) in tcp_rcv_state_process()
5939 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && in tcp_rcv_state_process()
5940 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { in tcp_rcv_state_process()
5948 tcp_data_queue(sk, skb); in tcp_rcv_state_process()
5961 __kfree_skb(skb); in tcp_rcv_state_process()
5994 const struct sk_buff *skb, in tcp_ecn_create_request() argument
5998 const struct tcphdr *th = tcp_hdr(skb); in tcp_ecn_create_request()
6006 ect = !INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield); in tcp_ecn_create_request()
6015 struct sk_buff *skb, const struct sock *sk) in tcp_openreq_init() argument
6021 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; in tcp_openreq_init()
6022 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; in tcp_openreq_init()
6033 ireq->ir_rmt_port = tcp_hdr(skb)->source; in tcp_openreq_init()
6034 ireq->ir_num = ntohs(tcp_hdr(skb)->dest); in tcp_openreq_init()
6035 ireq->ir_mark = inet_request_mark(sk, skb); in tcp_openreq_init()
6062 const struct sk_buff *skb, in tcp_syn_flood_action() argument
6082 proto, ntohs(tcp_hdr(skb)->dest), msg); in tcp_syn_flood_action()
6089 struct sock *sk, struct sk_buff *skb) in tcp_conn_request() argument
6095 __u32 isn = TCP_SKB_CB(skb)->tcp_tw_isn; in tcp_conn_request()
6108 want_cookie = tcp_syn_flood_action(sk, skb, rsk_ops->slab_name); in tcp_conn_request()
6133 tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc); in tcp_conn_request()
6139 tcp_openreq_init(req, &tmp_opt, skb, sk); in tcp_conn_request()
6144 af_ops->init_req(req, sk, skb); in tcp_conn_request()
6146 if (security_inet_conn_request(sk, skb, req)) in tcp_conn_request()
6184 pr_drop_req(req, ntohs(tcp_hdr(skb)->source), in tcp_conn_request()
6189 isn = af_ops->init_seq(skb); in tcp_conn_request()
6197 tcp_ecn_create_request(req, skb, sk, dst); in tcp_conn_request()
6200 isn = cookie_init_sequence(af_ops, sk, skb, &req->mss); in tcp_conn_request()
6209 tcp_try_fastopen(sk, skb, req, &foc, dst); in tcp_conn_request()
6211 skb_get_queue_mapping(skb), &foc); in tcp_conn_request()