Lines Matching refs:sk

132 static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)  in tcp_measure_rcv_mss()  argument
134 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_measure_rcv_mss()
165 len -= tcp_sk(sk)->tcp_header_len; in tcp_measure_rcv_mss()
178 static void tcp_incr_quickack(struct sock *sk) in tcp_incr_quickack() argument
180 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_incr_quickack()
181 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); in tcp_incr_quickack()
189 static void tcp_enter_quickack_mode(struct sock *sk) in tcp_enter_quickack_mode() argument
191 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_enter_quickack_mode()
192 tcp_incr_quickack(sk); in tcp_enter_quickack_mode()
201 static bool tcp_in_quickack_mode(struct sock *sk) in tcp_in_quickack_mode() argument
203 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_in_quickack_mode()
204 const struct dst_entry *dst = __sk_dst_get(sk); in tcp_in_quickack_mode()
287 static void tcp_sndbuf_expand(struct sock *sk) in tcp_sndbuf_expand() argument
289 const struct tcp_sock *tp = tcp_sk(sk); in tcp_sndbuf_expand()
312 if (sk->sk_sndbuf < sndmem) in tcp_sndbuf_expand()
313 sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]); in tcp_sndbuf_expand()
342 static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb) in __tcp_grow_window() argument
344 struct tcp_sock *tp = tcp_sk(sk); in __tcp_grow_window()
351 return 2 * inet_csk(sk)->icsk_ack.rcv_mss; in __tcp_grow_window()
359 static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) in tcp_grow_window() argument
361 struct tcp_sock *tp = tcp_sk(sk); in tcp_grow_window()
365 (int)tp->rcv_ssthresh < tcp_space(sk) && in tcp_grow_window()
366 !tcp_under_memory_pressure(sk)) { in tcp_grow_window()
375 incr = __tcp_grow_window(sk, skb); in tcp_grow_window()
381 inet_csk(sk)->icsk_ack.quick |= 1; in tcp_grow_window()
387 static void tcp_fixup_rcvbuf(struct sock *sk) in tcp_fixup_rcvbuf() argument
389 u32 mss = tcp_sk(sk)->advmss; in tcp_fixup_rcvbuf()
401 if (sk->sk_rcvbuf < rcvmem) in tcp_fixup_rcvbuf()
402 sk->sk_rcvbuf = min(rcvmem, sysctl_tcp_rmem[2]); in tcp_fixup_rcvbuf()
408 void tcp_init_buffer_space(struct sock *sk) in tcp_init_buffer_space() argument
410 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_buffer_space()
413 if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) in tcp_init_buffer_space()
414 tcp_fixup_rcvbuf(sk); in tcp_init_buffer_space()
415 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) in tcp_init_buffer_space()
416 tcp_sndbuf_expand(sk); in tcp_init_buffer_space()
422 maxwin = tcp_full_space(sk); in tcp_init_buffer_space()
444 static void tcp_clamp_window(struct sock *sk) in tcp_clamp_window() argument
446 struct tcp_sock *tp = tcp_sk(sk); in tcp_clamp_window()
447 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clamp_window()
451 if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] && in tcp_clamp_window()
452 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && in tcp_clamp_window()
453 !tcp_under_memory_pressure(sk) && in tcp_clamp_window()
454 sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) { in tcp_clamp_window()
455 sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), in tcp_clamp_window()
458 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) in tcp_clamp_window()
469 void tcp_initialize_rcv_mss(struct sock *sk) in tcp_initialize_rcv_mss() argument
471 const struct tcp_sock *tp = tcp_sk(sk); in tcp_initialize_rcv_mss()
478 inet_csk(sk)->icsk_ack.rcv_mss = hint; in tcp_initialize_rcv_mss()
542 static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, in tcp_rcv_rtt_measure_ts() argument
545 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_rtt_measure_ts()
548 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) in tcp_rcv_rtt_measure_ts()
556 void tcp_rcv_space_adjust(struct sock *sk) in tcp_rcv_space_adjust() argument
558 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_space_adjust()
581 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { in tcp_rcv_space_adjust()
608 if (rcvbuf > sk->sk_rcvbuf) { in tcp_rcv_space_adjust()
609 sk->sk_rcvbuf = rcvbuf; in tcp_rcv_space_adjust()
632 static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) in tcp_event_data_recv() argument
634 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_data_recv()
635 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_data_recv()
638 inet_csk_schedule_ack(sk); in tcp_event_data_recv()
640 tcp_measure_rcv_mss(sk, skb); in tcp_event_data_recv()
650 tcp_incr_quickack(sk); in tcp_event_data_recv()
666 tcp_incr_quickack(sk); in tcp_event_data_recv()
667 sk_mem_reclaim(sk); in tcp_event_data_recv()
675 tcp_grow_window(sk, skb); in tcp_event_data_recv()
687 static void tcp_rtt_estimator(struct sock *sk, long mrtt_us) in tcp_rtt_estimator() argument
689 struct tcp_sock *tp = tcp_sk(sk); in tcp_rtt_estimator()
738 tp->mdev_max_us = tcp_rto_min_us(sk); in tcp_rtt_estimator()
744 tp->rttvar_us = max(tp->mdev_us, tcp_rto_min_us(sk)); in tcp_rtt_estimator()
760 static void tcp_update_pacing_rate(struct sock *sk) in tcp_update_pacing_rate() argument
762 const struct tcp_sock *tp = tcp_sk(sk); in tcp_update_pacing_rate()
790 ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate, in tcp_update_pacing_rate()
791 sk->sk_max_pacing_rate); in tcp_update_pacing_rate()
797 static void tcp_set_rto(struct sock *sk) in tcp_set_rto() argument
799 const struct tcp_sock *tp = tcp_sk(sk); in tcp_set_rto()
810 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp); in tcp_set_rto()
821 tcp_bound_rto(sk); in tcp_set_rto()
851 static void tcp_update_reordering(struct sock *sk, const int metric, in tcp_update_reordering() argument
854 struct tcp_sock *tp = tcp_sk(sk); in tcp_update_reordering()
870 NET_INC_STATS_BH(sock_net(sk), mib_idx); in tcp_update_reordering()
873 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, in tcp_update_reordering()
1051 static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, in tcp_check_dsack() argument
1055 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_dsack()
1063 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV); in tcp_check_dsack()
1072 NET_INC_STATS_BH(sock_net(sk), in tcp_check_dsack()
1106 static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, in tcp_match_skb_to_sack() argument
1144 err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC); in tcp_match_skb_to_sack()
1153 static u8 tcp_sacktag_one(struct sock *sk, in tcp_sacktag_one() argument
1159 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_one()
1240 static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, in tcp_shifted_skb() argument
1245 struct tcp_sock *tp = tcp_sk(sk); in tcp_shifted_skb()
1246 struct sk_buff *prev = tcp_write_queue_prev(sk, skb); in tcp_shifted_skb()
1258 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, in tcp_shifted_skb()
1289 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED); in tcp_shifted_skb()
1306 if (skb == tcp_highest_sack(sk)) in tcp_shifted_skb()
1307 tcp_advance_highest_sack(sk, skb); in tcp_shifted_skb()
1309 tcp_unlink_write_queue(skb, sk); in tcp_shifted_skb()
1310 sk_wmem_free_skb(sk, skb); in tcp_shifted_skb()
1312 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED); in tcp_shifted_skb()
1334 static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, in tcp_shift_skb_data() argument
1339 struct tcp_sock *tp = tcp_sk(sk); in tcp_shift_skb_data()
1346 if (!sk_can_gso(sk)) in tcp_shift_skb_data()
1360 if (unlikely(skb == tcp_write_queue_head(sk))) in tcp_shift_skb_data()
1362 prev = tcp_write_queue_prev(sk, skb); in tcp_shift_skb_data()
1438 if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack)) in tcp_shift_skb_data()
1444 if (prev == tcp_write_queue_tail(sk)) in tcp_shift_skb_data()
1446 skb = tcp_write_queue_next(sk, prev); in tcp_shift_skb_data()
1449 (skb == tcp_send_head(sk)) || in tcp_shift_skb_data()
1457 tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0); in tcp_shift_skb_data()
1468 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK); in tcp_shift_skb_data()
1472 static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, in tcp_sacktag_walk() argument
1478 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_walk()
1481 tcp_for_write_queue_from(skb, sk) { in tcp_sacktag_walk()
1485 if (skb == tcp_send_head(sk)) in tcp_sacktag_walk()
1494 in_sack = tcp_match_skb_to_sack(sk, skb, in tcp_sacktag_walk()
1506 tmp = tcp_shift_skb_data(sk, skb, state, in tcp_sacktag_walk()
1516 in_sack = tcp_match_skb_to_sack(sk, skb, in tcp_sacktag_walk()
1527 tcp_sacktag_one(sk, in tcp_sacktag_walk()
1538 tcp_advance_highest_sack(sk, skb); in tcp_sacktag_walk()
1549 static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk, in tcp_sacktag_skip() argument
1553 tcp_for_write_queue_from(skb, sk) { in tcp_sacktag_skip()
1554 if (skb == tcp_send_head(sk)) in tcp_sacktag_skip()
1566 struct sock *sk, in tcp_maybe_skipping_dsack() argument
1575 skb = tcp_sacktag_skip(skb, sk, state, next_dup->start_seq); in tcp_maybe_skipping_dsack()
1576 skb = tcp_sacktag_walk(skb, sk, NULL, state, in tcp_maybe_skipping_dsack()
1590 tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, in tcp_sacktag_write_queue() argument
1593 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_write_queue()
1612 tcp_highest_sack_reset(sk); in tcp_sacktag_write_queue()
1615 found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire, in tcp_sacktag_write_queue()
1656 NET_INC_STATS_BH(sock_net(sk), mib_idx); in tcp_sacktag_write_queue()
1682 skb = tcp_write_queue_head(sk); in tcp_sacktag_write_queue()
1717 skb = tcp_sacktag_skip(skb, sk, state, in tcp_sacktag_write_queue()
1719 skb = tcp_sacktag_walk(skb, sk, next_dup, in tcp_sacktag_write_queue()
1730 skb = tcp_maybe_skipping_dsack(skb, sk, next_dup, in tcp_sacktag_write_queue()
1737 skb = tcp_highest_sack(sk); in tcp_sacktag_write_queue()
1745 skb = tcp_sacktag_skip(skb, sk, state, cache->end_seq); in tcp_sacktag_write_queue()
1752 skb = tcp_highest_sack(sk); in tcp_sacktag_write_queue()
1757 skb = tcp_sacktag_skip(skb, sk, state, start_seq); in tcp_sacktag_write_queue()
1760 skb = tcp_sacktag_walk(skb, sk, next_dup, state, in tcp_sacktag_write_queue()
1776 ((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker)) in tcp_sacktag_write_queue()
1777 tcp_update_reordering(sk, tp->fackets_out - state->reord, 0); in tcp_sacktag_write_queue()
1812 static void tcp_check_reno_reordering(struct sock *sk, const int addend) in tcp_check_reno_reordering() argument
1814 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_reno_reordering()
1816 tcp_update_reordering(sk, tp->packets_out + addend, 0); in tcp_check_reno_reordering()
1821 static void tcp_add_reno_sack(struct sock *sk) in tcp_add_reno_sack() argument
1823 struct tcp_sock *tp = tcp_sk(sk); in tcp_add_reno_sack()
1825 tcp_check_reno_reordering(sk, 0); in tcp_add_reno_sack()
1831 static void tcp_remove_reno_sacks(struct sock *sk, int acked) in tcp_remove_reno_sacks() argument
1833 struct tcp_sock *tp = tcp_sk(sk); in tcp_remove_reno_sacks()
1842 tcp_check_reno_reordering(sk, acked); in tcp_remove_reno_sacks()
1872 void tcp_enter_loss(struct sock *sk) in tcp_enter_loss() argument
1874 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_enter_loss()
1875 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_loss()
1884 tp->prior_ssthresh = tcp_current_ssthresh(sk); in tcp_enter_loss()
1885 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); in tcp_enter_loss()
1886 tcp_ca_event(sk, CA_EVENT_LOSS); in tcp_enter_loss()
1899 skb = tcp_write_queue_head(sk); in tcp_enter_loss()
1902 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); in tcp_enter_loss()
1908 tcp_for_write_queue(skb, sk) { in tcp_enter_loss()
1909 if (skb == tcp_send_head(sk)) in tcp_enter_loss()
1929 tcp_set_ca_state(sk, TCP_CA_Loss); in tcp_enter_loss()
1939 !inet_csk(sk)->icsk_mtup.probe_size; in tcp_enter_loss()
1952 static bool tcp_check_sack_reneging(struct sock *sk, int flag) in tcp_check_sack_reneging() argument
1955 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_sack_reneging()
1959 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, in tcp_check_sack_reneging()
1991 static bool tcp_pause_early_retransmit(struct sock *sk, int flag) in tcp_pause_early_retransmit() argument
1993 struct tcp_sock *tp = tcp_sk(sk); in tcp_pause_early_retransmit()
2007 if (!time_after(inet_csk(sk)->icsk_timeout, (jiffies + delay))) in tcp_pause_early_retransmit()
2010 inet_csk_reset_xmit_timer(sk, ICSK_TIME_EARLY_RETRANS, delay, in tcp_pause_early_retransmit()
2108 static bool tcp_time_to_recover(struct sock *sk, int flag) in tcp_time_to_recover() argument
2110 struct tcp_sock *tp = tcp_sk(sk); in tcp_time_to_recover()
2127 !tcp_may_send_now(sk)) { in tcp_time_to_recover()
2141 tcp_is_sack(tp) && !tcp_send_head(sk)) in tcp_time_to_recover()
2151 !tcp_may_send_now(sk)) in tcp_time_to_recover()
2152 return !tcp_pause_early_retransmit(sk, flag); in tcp_time_to_recover()
2163 static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) in tcp_mark_head_lost() argument
2165 struct tcp_sock *tp = tcp_sk(sk); in tcp_mark_head_lost()
2178 if (mark_head && skb != tcp_write_queue_head(sk)) in tcp_mark_head_lost()
2181 skb = tcp_write_queue_head(sk); in tcp_mark_head_lost()
2185 tcp_for_write_queue_from(skb, sk) { in tcp_mark_head_lost()
2186 if (skb == tcp_send_head(sk)) in tcp_mark_head_lost()
2208 err = tcp_fragment(sk, skb, (packets - oldcnt) * mss, in tcp_mark_head_lost()
2225 static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit) in tcp_update_scoreboard() argument
2227 struct tcp_sock *tp = tcp_sk(sk); in tcp_update_scoreboard()
2230 tcp_mark_head_lost(sk, 1, 1); in tcp_update_scoreboard()
2235 tcp_mark_head_lost(sk, lost, 0); in tcp_update_scoreboard()
2239 tcp_mark_head_lost(sk, sacked_upto, 0); in tcp_update_scoreboard()
2241 tcp_mark_head_lost(sk, 1, 1); in tcp_update_scoreboard()
2296 static bool tcp_any_retrans_done(const struct sock *sk) in tcp_any_retrans_done() argument
2298 const struct tcp_sock *tp = tcp_sk(sk); in tcp_any_retrans_done()
2304 skb = tcp_write_queue_head(sk); in tcp_any_retrans_done()
2312 static void DBGUNDO(struct sock *sk, const char *msg) in DBGUNDO() argument
2314 struct tcp_sock *tp = tcp_sk(sk); in DBGUNDO()
2315 struct inet_sock *inet = inet_sk(sk); in DBGUNDO()
2317 if (sk->sk_family == AF_INET) { in DBGUNDO()
2326 else if (sk->sk_family == AF_INET6) { in DBGUNDO()
2327 struct ipv6_pinfo *np = inet6_sk(sk); in DBGUNDO()
2341 static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss) in tcp_undo_cwnd_reduction() argument
2343 struct tcp_sock *tp = tcp_sk(sk); in tcp_undo_cwnd_reduction()
2348 tcp_for_write_queue(skb, sk) { in tcp_undo_cwnd_reduction()
2349 if (skb == tcp_send_head(sk)) in tcp_undo_cwnd_reduction()
2358 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_undo_cwnd_reduction()
2361 tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); in tcp_undo_cwnd_reduction()
2382 static bool tcp_try_undo_recovery(struct sock *sk) in tcp_try_undo_recovery() argument
2384 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_recovery()
2392 DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); in tcp_try_undo_recovery()
2393 tcp_undo_cwnd_reduction(sk, false); in tcp_try_undo_recovery()
2394 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) in tcp_try_undo_recovery()
2399 NET_INC_STATS_BH(sock_net(sk), mib_idx); in tcp_try_undo_recovery()
2406 if (!tcp_any_retrans_done(sk)) in tcp_try_undo_recovery()
2410 tcp_set_ca_state(sk, TCP_CA_Open); in tcp_try_undo_recovery()
2415 static bool tcp_try_undo_dsack(struct sock *sk) in tcp_try_undo_dsack() argument
2417 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_dsack()
2420 DBGUNDO(sk, "D-SACK"); in tcp_try_undo_dsack()
2421 tcp_undo_cwnd_reduction(sk, false); in tcp_try_undo_dsack()
2422 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); in tcp_try_undo_dsack()
2429 static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) in tcp_try_undo_loss() argument
2431 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_loss()
2434 tcp_undo_cwnd_reduction(sk, true); in tcp_try_undo_loss()
2436 DBGUNDO(sk, "partial loss"); in tcp_try_undo_loss()
2437 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); in tcp_try_undo_loss()
2439 NET_INC_STATS_BH(sock_net(sk), in tcp_try_undo_loss()
2441 inet_csk(sk)->icsk_retransmits = 0; in tcp_try_undo_loss()
2443 tcp_set_ca_state(sk, TCP_CA_Open); in tcp_try_undo_loss()
2458 static void tcp_init_cwnd_reduction(struct sock *sk) in tcp_init_cwnd_reduction() argument
2460 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_cwnd_reduction()
2468 tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); in tcp_init_cwnd_reduction()
2472 static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked, in tcp_cwnd_reduction() argument
2475 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_reduction()
2501 static inline void tcp_end_cwnd_reduction(struct sock *sk) in tcp_end_cwnd_reduction() argument
2503 struct tcp_sock *tp = tcp_sk(sk); in tcp_end_cwnd_reduction()
2506 if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || in tcp_end_cwnd_reduction()
2511 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); in tcp_end_cwnd_reduction()
2515 void tcp_enter_cwr(struct sock *sk) in tcp_enter_cwr() argument
2517 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_cwr()
2520 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { in tcp_enter_cwr()
2522 tcp_init_cwnd_reduction(sk); in tcp_enter_cwr()
2523 tcp_set_ca_state(sk, TCP_CA_CWR); in tcp_enter_cwr()
2528 static void tcp_try_keep_open(struct sock *sk) in tcp_try_keep_open() argument
2530 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_keep_open()
2533 if (tcp_left_out(tp) || tcp_any_retrans_done(sk)) in tcp_try_keep_open()
2536 if (inet_csk(sk)->icsk_ca_state != state) { in tcp_try_keep_open()
2537 tcp_set_ca_state(sk, state); in tcp_try_keep_open()
2542 static void tcp_try_to_open(struct sock *sk, int flag, const int prior_unsacked) in tcp_try_to_open() argument
2544 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_to_open()
2548 if (!tcp_any_retrans_done(sk)) in tcp_try_to_open()
2552 tcp_enter_cwr(sk); in tcp_try_to_open()
2554 if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { in tcp_try_to_open()
2555 tcp_try_keep_open(sk); in tcp_try_to_open()
2557 tcp_cwnd_reduction(sk, prior_unsacked, 0, flag); in tcp_try_to_open()
2561 static void tcp_mtup_probe_failed(struct sock *sk) in tcp_mtup_probe_failed() argument
2563 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtup_probe_failed()
2567 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMTUPFAIL); in tcp_mtup_probe_failed()
2570 static void tcp_mtup_probe_success(struct sock *sk) in tcp_mtup_probe_success() argument
2572 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtup_probe_success()
2573 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtup_probe_success()
2576 tp->prior_ssthresh = tcp_current_ssthresh(sk); in tcp_mtup_probe_success()
2578 tcp_mss_to_mtu(sk, tp->mss_cache) / in tcp_mtup_probe_success()
2582 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_mtup_probe_success()
2586 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_mtup_probe_success()
2587 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS); in tcp_mtup_probe_success()
2594 void tcp_simple_retransmit(struct sock *sk) in tcp_simple_retransmit() argument
2596 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_simple_retransmit()
2597 struct tcp_sock *tp = tcp_sk(sk); in tcp_simple_retransmit()
2599 unsigned int mss = tcp_current_mss(sk); in tcp_simple_retransmit()
2602 tcp_for_write_queue(skb, sk) { in tcp_simple_retransmit()
2603 if (skb == tcp_send_head(sk)) in tcp_simple_retransmit()
2632 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_simple_retransmit()
2635 tcp_set_ca_state(sk, TCP_CA_Loss); in tcp_simple_retransmit()
2637 tcp_xmit_retransmit_queue(sk); in tcp_simple_retransmit()
2641 static void tcp_enter_recovery(struct sock *sk, bool ece_ack) in tcp_enter_recovery() argument
2643 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_recovery()
2651 NET_INC_STATS_BH(sock_net(sk), mib_idx); in tcp_enter_recovery()
2656 if (!tcp_in_cwnd_reduction(sk)) { in tcp_enter_recovery()
2658 tp->prior_ssthresh = tcp_current_ssthresh(sk); in tcp_enter_recovery()
2659 tcp_init_cwnd_reduction(sk); in tcp_enter_recovery()
2661 tcp_set_ca_state(sk, TCP_CA_Recovery); in tcp_enter_recovery()
2667 static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack) in tcp_process_loss() argument
2669 struct tcp_sock *tp = tcp_sk(sk); in tcp_process_loss()
2673 tcp_try_undo_loss(sk, false)) in tcp_process_loss()
2681 tcp_try_undo_loss(sk, true)) in tcp_process_loss()
2689 __tcp_push_pending_frames(sk, tcp_current_mss(sk), in tcp_process_loss()
2699 tcp_try_undo_recovery(sk); in tcp_process_loss()
2707 tcp_add_reno_sack(sk); in tcp_process_loss()
2711 tcp_xmit_retransmit_queue(sk); in tcp_process_loss()
2715 static bool tcp_try_undo_partial(struct sock *sk, const int acked, in tcp_try_undo_partial() argument
2718 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_partial()
2724 tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); in tcp_try_undo_partial()
2732 tcp_cwnd_reduction(sk, prior_unsacked, 0, flag); in tcp_try_undo_partial()
2736 if (!tcp_any_retrans_done(sk)) in tcp_try_undo_partial()
2739 DBGUNDO(sk, "partial recovery"); in tcp_try_undo_partial()
2740 tcp_undo_cwnd_reduction(sk, true); in tcp_try_undo_partial()
2741 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); in tcp_try_undo_partial()
2742 tcp_try_keep_open(sk); in tcp_try_undo_partial()
2759 static void tcp_fastretrans_alert(struct sock *sk, const int acked, in tcp_fastretrans_alert() argument
2763 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_fastretrans_alert()
2764 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastretrans_alert()
2780 if (tcp_check_sack_reneging(sk, flag)) in tcp_fastretrans_alert()
2797 tcp_end_cwnd_reduction(sk); in tcp_fastretrans_alert()
2798 tcp_set_ca_state(sk, TCP_CA_Open); in tcp_fastretrans_alert()
2805 if (tcp_try_undo_recovery(sk)) in tcp_fastretrans_alert()
2807 tcp_end_cwnd_reduction(sk); in tcp_fastretrans_alert()
2814 tcp_rack_mark_lost(sk)) in tcp_fastretrans_alert()
2822 tcp_add_reno_sack(sk); in tcp_fastretrans_alert()
2824 if (tcp_try_undo_partial(sk, acked, prior_unsacked, flag)) in tcp_fastretrans_alert()
2830 if (tcp_try_undo_dsack(sk)) { in tcp_fastretrans_alert()
2831 tcp_try_keep_open(sk); in tcp_fastretrans_alert()
2836 tcp_process_loss(sk, flag, is_dupack); in tcp_fastretrans_alert()
2846 tcp_add_reno_sack(sk); in tcp_fastretrans_alert()
2850 tcp_try_undo_dsack(sk); in tcp_fastretrans_alert()
2852 if (!tcp_time_to_recover(sk, flag)) { in tcp_fastretrans_alert()
2853 tcp_try_to_open(sk, flag, prior_unsacked); in tcp_fastretrans_alert()
2861 tcp_mtup_probe_failed(sk); in tcp_fastretrans_alert()
2864 tcp_simple_retransmit(sk); in tcp_fastretrans_alert()
2869 tcp_enter_recovery(sk, (flag & FLAG_ECE)); in tcp_fastretrans_alert()
2874 tcp_update_scoreboard(sk, fast_rexmit); in tcp_fastretrans_alert()
2875 tcp_cwnd_reduction(sk, prior_unsacked, fast_rexmit, flag); in tcp_fastretrans_alert()
2876 tcp_xmit_retransmit_queue(sk); in tcp_fastretrans_alert()
2897 static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us) in tcp_update_rtt_min() argument
2900 struct rtt_meas *m = tcp_sk(sk)->rtt_min; in tcp_update_rtt_min()
2939 static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag, in tcp_ack_update_rtt() argument
2943 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_update_rtt()
2970 tcp_update_rtt_min(sk, ca_rtt_us); in tcp_ack_update_rtt()
2971 tcp_rtt_estimator(sk, seq_rtt_us); in tcp_ack_update_rtt()
2972 tcp_set_rto(sk); in tcp_ack_update_rtt()
2975 inet_csk(sk)->icsk_backoff = 0; in tcp_ack_update_rtt()
2980 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req) in tcp_synack_rtt_meas() argument
2991 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L, rtt_us); in tcp_synack_rtt_meas()
2995 static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_cong_avoid() argument
2997 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cong_avoid()
2999 icsk->icsk_ca_ops->cong_avoid(sk, ack, acked); in tcp_cong_avoid()
3000 tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp; in tcp_cong_avoid()
3006 void tcp_rearm_rto(struct sock *sk) in tcp_rearm_rto() argument
3008 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_rearm_rto()
3009 struct tcp_sock *tp = tcp_sk(sk); in tcp_rearm_rto()
3018 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); in tcp_rearm_rto()
3020 u32 rto = inet_csk(sk)->icsk_rto; in tcp_rearm_rto()
3024 struct sk_buff *skb = tcp_write_queue_head(sk); in tcp_rearm_rto()
3034 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, in tcp_rearm_rto()
3042 void tcp_resume_early_retransmit(struct sock *sk) in tcp_resume_early_retransmit() argument
3044 struct tcp_sock *tp = tcp_sk(sk); in tcp_resume_early_retransmit()
3046 tcp_rearm_rto(sk); in tcp_resume_early_retransmit()
3052 tcp_enter_recovery(sk, false); in tcp_resume_early_retransmit()
3053 tcp_update_scoreboard(sk, 1); in tcp_resume_early_retransmit()
3054 tcp_xmit_retransmit_queue(sk); in tcp_resume_early_retransmit()
3058 static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) in tcp_tso_acked() argument
3060 struct tcp_sock *tp = tcp_sk(sk); in tcp_tso_acked()
3066 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) in tcp_tso_acked()
3078 static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb, in tcp_ack_tstamp() argument
3084 if (likely(!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK))) in tcp_ack_tstamp()
3089 between(shinfo->tskey, prior_snd_una, tcp_sk(sk)->snd_una - 1)) in tcp_ack_tstamp()
3090 __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK); in tcp_ack_tstamp()
3097 static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, in tcp_clean_rtx_queue() argument
3101 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clean_rtx_queue()
3103 struct tcp_sock *tp = tcp_sk(sk); in tcp_clean_rtx_queue()
3117 while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { in tcp_clean_rtx_queue()
3122 tcp_ack_tstamp(sk, skb, prior_snd_una); in tcp_clean_rtx_queue()
3130 acked_pcount = tcp_tso_acked(sk, skb); in tcp_clean_rtx_queue()
3183 tcp_unlink_write_queue(skb, sk); in tcp_clean_rtx_queue()
3184 sk_wmem_free_skb(sk, skb); in tcp_clean_rtx_queue()
3207 rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us, in tcp_clean_rtx_queue()
3211 tcp_rearm_rto(sk); in tcp_clean_rtx_queue()
3214 tcp_mtup_probe_success(sk); in tcp_clean_rtx_queue()
3218 tcp_remove_reno_sacks(sk, pkts_acked); in tcp_clean_rtx_queue()
3224 tcp_update_reordering(sk, tp->fackets_out - reord, 0); in tcp_clean_rtx_queue()
3239 tcp_rearm_rto(sk); in tcp_clean_rtx_queue()
3243 icsk->icsk_ca_ops->pkts_acked(sk, pkts_acked, ca_rtt_us); in tcp_clean_rtx_queue()
3250 icsk = inet_csk(sk); in tcp_clean_rtx_queue()
3271 static void tcp_ack_probe(struct sock *sk) in tcp_ack_probe() argument
3273 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_probe()
3274 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ack_probe()
3278 if (!after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, tcp_wnd_end(tp))) { in tcp_ack_probe()
3280 inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0); in tcp_ack_probe()
3285 unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX); in tcp_ack_probe()
3287 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, in tcp_ack_probe()
3292 static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag) in tcp_ack_is_dubious() argument
3295 inet_csk(sk)->icsk_ca_state != TCP_CA_Open; in tcp_ack_is_dubious()
3299 static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag) in tcp_may_raise_cwnd() argument
3301 if (tcp_in_cwnd_reduction(sk)) in tcp_may_raise_cwnd()
3310 if (tcp_sk(sk)->reordering > sysctl_tcp_reordering) in tcp_may_raise_cwnd()
3355 static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack, in tcp_ack_update_window() argument
3358 struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_update_window()
3376 tcp_fast_path_check(sk); in tcp_ack_update_window()
3378 if (tcp_send_head(sk)) in tcp_ack_update_window()
3379 tcp_slow_start_after_idle_check(sk); in tcp_ack_update_window()
3383 tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie); in tcp_ack_update_window()
3424 static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb) in tcp_send_challenge_ack() argument
3429 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_challenge_ack()
3433 if (tcp_oow_rate_limited(sock_net(sk), skb, in tcp_send_challenge_ack()
3445 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK); in tcp_send_challenge_ack()
3446 tcp_send_ack(sk); in tcp_send_challenge_ack()
3476 static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) in tcp_process_tlp_ack() argument
3478 struct tcp_sock *tp = tcp_sk(sk); in tcp_process_tlp_ack()
3490 tcp_init_cwnd_reduction(sk); in tcp_process_tlp_ack()
3491 tcp_set_ca_state(sk, TCP_CA_CWR); in tcp_process_tlp_ack()
3492 tcp_end_cwnd_reduction(sk); in tcp_process_tlp_ack()
3493 tcp_try_keep_open(sk); in tcp_process_tlp_ack()
3494 NET_INC_STATS_BH(sock_net(sk), in tcp_process_tlp_ack()
3503 static inline void tcp_in_ack_event(struct sock *sk, u32 flags) in tcp_in_ack_event() argument
3505 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_in_ack_event()
3508 icsk->icsk_ca_ops->in_ack_event(sk, flags); in tcp_in_ack_event()
3512 static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) in tcp_ack() argument
3514 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ack()
3515 struct tcp_sock *tp = tcp_sk(sk); in tcp_ack()
3529 prefetchw(sk->sk_write_queue.next); in tcp_ack()
3537 tcp_send_challenge_ack(sk, skb); in tcp_ack()
3551 tcp_rearm_rto(sk); in tcp_ack()
3575 tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE); in tcp_ack()
3577 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS); in tcp_ack()
3584 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPUREACKS); in tcp_ack()
3586 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); in tcp_ack()
3589 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, in tcp_ack()
3600 tcp_in_ack_event(sk, ack_ev_flags); in tcp_ack()
3606 sk->sk_err_soft = 0; in tcp_ack()
3614 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, in tcp_ack()
3618 if (tcp_ack_is_dubious(sk, flag)) { in tcp_ack()
3620 tcp_fastretrans_alert(sk, acked, prior_unsacked, in tcp_ack()
3624 tcp_process_tlp_ack(sk, ack, flag); in tcp_ack()
3627 if (tcp_may_raise_cwnd(sk, flag)) in tcp_ack()
3628 tcp_cong_avoid(sk, ack, acked); in tcp_ack()
3631 struct dst_entry *dst = __sk_dst_get(sk); in tcp_ack()
3637 tcp_schedule_loss_probe(sk); in tcp_ack()
3638 tcp_update_pacing_rate(sk); in tcp_ack()
3644 tcp_fastretrans_alert(sk, acked, prior_unsacked, in tcp_ack()
3650 if (tcp_send_head(sk)) in tcp_ack()
3651 tcp_ack_probe(sk); in tcp_ack()
3654 tcp_process_tlp_ack(sk, ack, flag); in tcp_ack()
3658 SOCK_DEBUG(sk, "Ack %u after %u:%u\n", ack, tp->snd_una, tp->snd_nxt); in tcp_ack()
3666 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, in tcp_ack()
3668 tcp_fastretrans_alert(sk, acked, prior_unsacked, in tcp_ack()
3672 SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt); in tcp_ack()
3913 static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb) in tcp_disordered_ack() argument
3915 const struct tcp_sock *tp = tcp_sk(sk); in tcp_disordered_ack()
3930 (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ); in tcp_disordered_ack()
3933 static inline bool tcp_paws_discard(const struct sock *sk, in tcp_paws_discard() argument
3936 const struct tcp_sock *tp = tcp_sk(sk); in tcp_paws_discard()
3939 !tcp_disordered_ack(sk, skb); in tcp_paws_discard()
3962 void tcp_reset(struct sock *sk) in tcp_reset() argument
3965 switch (sk->sk_state) { in tcp_reset()
3967 sk->sk_err = ECONNREFUSED; in tcp_reset()
3970 sk->sk_err = EPIPE; in tcp_reset()
3975 sk->sk_err = ECONNRESET; in tcp_reset()
3980 if (!sock_flag(sk, SOCK_DEAD)) in tcp_reset()
3981 sk->sk_error_report(sk); in tcp_reset()
3983 tcp_done(sk); in tcp_reset()
4000 static void tcp_fin(struct sock *sk) in tcp_fin() argument
4002 struct tcp_sock *tp = tcp_sk(sk); in tcp_fin()
4004 inet_csk_schedule_ack(sk); in tcp_fin()
4006 sk->sk_shutdown |= RCV_SHUTDOWN; in tcp_fin()
4007 sock_set_flag(sk, SOCK_DONE); in tcp_fin()
4009 switch (sk->sk_state) { in tcp_fin()
4013 tcp_set_state(sk, TCP_CLOSE_WAIT); in tcp_fin()
4014 inet_csk(sk)->icsk_ack.pingpong = 1; in tcp_fin()
4032 tcp_send_ack(sk); in tcp_fin()
4033 tcp_set_state(sk, TCP_CLOSING); in tcp_fin()
4037 tcp_send_ack(sk); in tcp_fin()
4038 tcp_time_wait(sk, TCP_TIME_WAIT, 0); in tcp_fin()
4045 __func__, sk->sk_state); in tcp_fin()
4055 sk_mem_reclaim(sk); in tcp_fin()
4057 if (!sock_flag(sk, SOCK_DEAD)) { in tcp_fin()
4058 sk->sk_state_change(sk); in tcp_fin()
4061 if (sk->sk_shutdown == SHUTDOWN_MASK || in tcp_fin()
4062 sk->sk_state == TCP_CLOSE) in tcp_fin()
4063 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); in tcp_fin()
4065 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); in tcp_fin()
4082 static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) in tcp_dsack_set() argument
4084 struct tcp_sock *tp = tcp_sk(sk); in tcp_dsack_set()
4094 NET_INC_STATS_BH(sock_net(sk), mib_idx); in tcp_dsack_set()
4102 static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq) in tcp_dsack_extend() argument
4104 struct tcp_sock *tp = tcp_sk(sk); in tcp_dsack_extend()
4107 tcp_dsack_set(sk, seq, end_seq); in tcp_dsack_extend()
4112 static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb) in tcp_send_dupack() argument
4114 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_dupack()
4118 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); in tcp_send_dupack()
4119 tcp_enter_quickack_mode(sk); in tcp_send_dupack()
4126 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq); in tcp_send_dupack()
4130 tcp_send_ack(sk); in tcp_send_dupack()
4161 static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) in tcp_sack_new_ofo_skb() argument
4163 struct tcp_sock *tp = tcp_sk(sk); in tcp_sack_new_ofo_skb()
4250 static bool tcp_try_coalesce(struct sock *sk, in tcp_try_coalesce() argument
4266 atomic_add(delta, &sk->sk_rmem_alloc); in tcp_try_coalesce()
4267 sk_mem_charge(sk, delta); in tcp_try_coalesce()
4268 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); in tcp_try_coalesce()
4278 static void tcp_ofo_queue(struct sock *sk) in tcp_ofo_queue() argument
4280 struct tcp_sock *tp = tcp_sk(sk); in tcp_ofo_queue()
4293 tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack); in tcp_ofo_queue()
4298 SOCK_DEBUG(sk, "ofo packet was already received\n"); in tcp_ofo_queue()
4302 SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n", in tcp_ofo_queue()
4306 tail = skb_peek_tail(&sk->sk_receive_queue); in tcp_ofo_queue()
4307 eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen); in tcp_ofo_queue()
4310 __skb_queue_tail(&sk->sk_receive_queue, skb); in tcp_ofo_queue()
4312 tcp_fin(sk); in tcp_ofo_queue()
4318 static bool tcp_prune_ofo_queue(struct sock *sk);
4319 static int tcp_prune_queue(struct sock *sk);
4321 static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb, in tcp_try_rmem_schedule() argument
4324 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || in tcp_try_rmem_schedule()
4325 !sk_rmem_schedule(sk, skb, size)) { in tcp_try_rmem_schedule()
4327 if (tcp_prune_queue(sk) < 0) in tcp_try_rmem_schedule()
4330 if (!sk_rmem_schedule(sk, skb, size)) { in tcp_try_rmem_schedule()
4331 if (!tcp_prune_ofo_queue(sk)) in tcp_try_rmem_schedule()
4334 if (!sk_rmem_schedule(sk, skb, size)) in tcp_try_rmem_schedule()
4341 static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) in tcp_data_queue_ofo() argument
4343 struct tcp_sock *tp = tcp_sk(sk); in tcp_data_queue_ofo()
4349 if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { in tcp_data_queue_ofo()
4350 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP); in tcp_data_queue_ofo()
4357 inet_csk_schedule_ack(sk); in tcp_data_queue_ofo()
4359 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOQUEUE); in tcp_data_queue_ofo()
4360 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", in tcp_data_queue_ofo()
4382 if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) { in tcp_data_queue_ofo()
4385 tcp_grow_window(sk, skb); in tcp_data_queue_ofo()
4414 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE); in tcp_data_queue_ofo()
4417 tcp_dsack_set(sk, seq, end_seq); in tcp_data_queue_ofo()
4422 tcp_dsack_set(sk, seq, in tcp_data_queue_ofo()
4446 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, in tcp_data_queue_ofo()
4451 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, in tcp_data_queue_ofo()
4453 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE); in tcp_data_queue_ofo()
4459 tcp_sack_new_ofo_skb(sk, seq, end_seq); in tcp_data_queue_ofo()
4462 tcp_grow_window(sk, skb); in tcp_data_queue_ofo()
4463 skb_set_owner_r(skb, sk); in tcp_data_queue_ofo()
4467 static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen, in tcp_queue_rcv() argument
4471 struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue); in tcp_queue_rcv()
4475 tcp_try_coalesce(sk, tail, skb, fragstolen)) ? 1 : 0; in tcp_queue_rcv()
4476 tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq); in tcp_queue_rcv()
4478 __skb_queue_tail(&sk->sk_receive_queue, skb); in tcp_queue_rcv()
4479 skb_set_owner_r(skb, sk); in tcp_queue_rcv()
4484 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) in tcp_send_rcvq() argument
4502 &err, sk->sk_allocation); in tcp_send_rcvq()
4510 if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) in tcp_send_rcvq()
4517 TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; in tcp_send_rcvq()
4519 TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1; in tcp_send_rcvq()
4521 if (tcp_queue_rcv(sk, skb, 0, &fragstolen)) { in tcp_send_rcvq()
4534 static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) in tcp_data_queue() argument
4536 struct tcp_sock *tp = tcp_sk(sk); in tcp_data_queue()
4561 sock_owned_by_user(sk) && !tp->urg_data) { in tcp_data_queue()
4572 tcp_rcv_space_adjust(sk); in tcp_data_queue()
4580 if (skb_queue_len(&sk->sk_receive_queue) == 0) in tcp_data_queue()
4581 sk_forced_mem_schedule(sk, skb->truesize); in tcp_data_queue()
4582 else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) in tcp_data_queue()
4585 eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); in tcp_data_queue()
4589 tcp_event_data_recv(sk, skb); in tcp_data_queue()
4591 tcp_fin(sk); in tcp_data_queue()
4594 tcp_ofo_queue(sk); in tcp_data_queue()
4600 inet_csk(sk)->icsk_ack.pingpong = 0; in tcp_data_queue()
4606 tcp_fast_path_check(sk); in tcp_data_queue()
4610 if (!sock_flag(sk, SOCK_DEAD)) in tcp_data_queue()
4611 sk->sk_data_ready(sk); in tcp_data_queue()
4617 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); in tcp_data_queue()
4618 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); in tcp_data_queue()
4621 tcp_enter_quickack_mode(sk); in tcp_data_queue()
4622 inet_csk_schedule_ack(sk); in tcp_data_queue()
4632 tcp_enter_quickack_mode(sk); in tcp_data_queue()
4636 SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n", in tcp_data_queue()
4640 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); in tcp_data_queue()
4650 tcp_data_queue_ofo(sk, skb); in tcp_data_queue()
4653 static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, in tcp_collapse_one() argument
4663 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); in tcp_collapse_one()
4677 tcp_collapse(struct sock *sk, struct sk_buff_head *list, in tcp_collapse() argument
4694 skb = tcp_collapse_one(sk, skb, list); in tcp_collapse()
4739 skb_set_owner_r(nskb, sk); in tcp_collapse()
4756 skb = tcp_collapse_one(sk, skb, list); in tcp_collapse()
4769 static void tcp_collapse_ofo_queue(struct sock *sk) in tcp_collapse_ofo_queue() argument
4771 struct tcp_sock *tp = tcp_sk(sk); in tcp_collapse_ofo_queue()
4795 tcp_collapse(sk, &tp->out_of_order_queue, in tcp_collapse_ofo_queue()
4816 static bool tcp_prune_ofo_queue(struct sock *sk) in tcp_prune_ofo_queue() argument
4818 struct tcp_sock *tp = tcp_sk(sk); in tcp_prune_ofo_queue()
4822 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED); in tcp_prune_ofo_queue()
4832 sk_mem_reclaim(sk); in tcp_prune_ofo_queue()
4845 static int tcp_prune_queue(struct sock *sk) in tcp_prune_queue() argument
4847 struct tcp_sock *tp = tcp_sk(sk); in tcp_prune_queue()
4849 SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); in tcp_prune_queue()
4851 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PRUNECALLED); in tcp_prune_queue()
4853 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) in tcp_prune_queue()
4854 tcp_clamp_window(sk); in tcp_prune_queue()
4855 else if (tcp_under_memory_pressure(sk)) in tcp_prune_queue()
4858 tcp_collapse_ofo_queue(sk); in tcp_prune_queue()
4859 if (!skb_queue_empty(&sk->sk_receive_queue)) in tcp_prune_queue()
4860 tcp_collapse(sk, &sk->sk_receive_queue, in tcp_prune_queue()
4861 skb_peek(&sk->sk_receive_queue), in tcp_prune_queue()
4864 sk_mem_reclaim(sk); in tcp_prune_queue()
4866 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) in tcp_prune_queue()
4872 tcp_prune_ofo_queue(sk); in tcp_prune_queue()
4874 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) in tcp_prune_queue()
4881 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_RCVPRUNED); in tcp_prune_queue()
4888 static bool tcp_should_expand_sndbuf(const struct sock *sk) in tcp_should_expand_sndbuf() argument
4890 const struct tcp_sock *tp = tcp_sk(sk); in tcp_should_expand_sndbuf()
4895 if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) in tcp_should_expand_sndbuf()
4899 if (tcp_under_memory_pressure(sk)) in tcp_should_expand_sndbuf()
4903 if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0)) in tcp_should_expand_sndbuf()
4919 static void tcp_new_space(struct sock *sk) in tcp_new_space() argument
4921 struct tcp_sock *tp = tcp_sk(sk); in tcp_new_space()
4923 if (tcp_should_expand_sndbuf(sk)) { in tcp_new_space()
4924 tcp_sndbuf_expand(sk); in tcp_new_space()
4928 sk->sk_write_space(sk); in tcp_new_space()
4931 static void tcp_check_space(struct sock *sk) in tcp_check_space() argument
4933 if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) { in tcp_check_space()
4934 sock_reset_flag(sk, SOCK_QUEUE_SHRUNK); in tcp_check_space()
4937 if (sk->sk_socket && in tcp_check_space()
4938 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) in tcp_check_space()
4939 tcp_new_space(sk); in tcp_check_space()
4943 static inline void tcp_data_snd_check(struct sock *sk) in tcp_data_snd_check() argument
4945 tcp_push_pending_frames(sk); in tcp_data_snd_check()
4946 tcp_check_space(sk); in tcp_data_snd_check()
4952 static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) in __tcp_ack_snd_check() argument
4954 struct tcp_sock *tp = tcp_sk(sk); in __tcp_ack_snd_check()
4957 if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss && in __tcp_ack_snd_check()
4961 __tcp_select_window(sk) >= tp->rcv_wnd) || in __tcp_ack_snd_check()
4963 tcp_in_quickack_mode(sk) || in __tcp_ack_snd_check()
4967 tcp_send_ack(sk); in __tcp_ack_snd_check()
4970 tcp_send_delayed_ack(sk); in __tcp_ack_snd_check()
4974 static inline void tcp_ack_snd_check(struct sock *sk) in tcp_ack_snd_check() argument
4976 if (!inet_csk_ack_scheduled(sk)) { in tcp_ack_snd_check()
4980 __tcp_ack_snd_check(sk, 1); in tcp_ack_snd_check()
4993 static void tcp_check_urg(struct sock *sk, const struct tcphdr *th) in tcp_check_urg() argument
4995 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_urg()
5024 sk_send_sigurg(sk); in tcp_check_urg()
5042 !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) { in tcp_check_urg()
5043 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); in tcp_check_urg()
5046 __skb_unlink(skb, &sk->sk_receive_queue); in tcp_check_urg()
5059 static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th) in tcp_urg() argument
5061 struct tcp_sock *tp = tcp_sk(sk); in tcp_urg()
5065 tcp_check_urg(sk, th); in tcp_urg()
5078 if (!sock_flag(sk, SOCK_DEAD)) in tcp_urg()
5079 sk->sk_data_ready(sk); in tcp_urg()
5084 static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) in tcp_copy_to_iovec() argument
5086 struct tcp_sock *tp = tcp_sk(sk); in tcp_copy_to_iovec()
5099 tcp_rcv_space_adjust(sk); in tcp_copy_to_iovec()
5106 static __sum16 __tcp_checksum_complete_user(struct sock *sk, in __tcp_checksum_complete_user() argument
5111 if (sock_owned_by_user(sk)) { in __tcp_checksum_complete_user()
5121 static inline bool tcp_checksum_complete_user(struct sock *sk, in tcp_checksum_complete_user() argument
5125 __tcp_checksum_complete_user(sk, skb); in tcp_checksum_complete_user()
5131 static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, in tcp_validate_incoming() argument
5134 struct tcp_sock *tp = tcp_sk(sk); in tcp_validate_incoming()
5138 tcp_paws_discard(sk, skb)) { in tcp_validate_incoming()
5140 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); in tcp_validate_incoming()
5141 if (!tcp_oow_rate_limited(sock_net(sk), skb, in tcp_validate_incoming()
5144 tcp_send_dupack(sk, skb); in tcp_validate_incoming()
5161 if (!tcp_oow_rate_limited(sock_net(sk), skb, in tcp_validate_incoming()
5164 tcp_send_dupack(sk, skb); in tcp_validate_incoming()
5178 tcp_reset(sk); in tcp_validate_incoming()
5180 tcp_send_challenge_ack(sk, skb); in tcp_validate_incoming()
5192 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); in tcp_validate_incoming()
5193 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE); in tcp_validate_incoming()
5194 tcp_send_challenge_ack(sk, skb); in tcp_validate_incoming()
5228 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, in tcp_rcv_established() argument
5231 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_established()
5233 if (unlikely(!sk->sk_rx_dst)) in tcp_rcv_established()
5234 inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); in tcp_rcv_established()
5303 tcp_ack(sk, skb, 0); in tcp_rcv_established()
5305 tcp_data_snd_check(sk); in tcp_rcv_established()
5308 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); in tcp_rcv_established()
5318 sock_owned_by_user(sk)) { in tcp_rcv_established()
5321 if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) { in tcp_rcv_established()
5332 tcp_rcv_rtt_measure_ts(sk, skb); in tcp_rcv_established()
5336 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER); in tcp_rcv_established()
5341 if (tcp_checksum_complete_user(sk, skb)) in tcp_rcv_established()
5344 if ((int)skb->truesize > sk->sk_forward_alloc) in tcp_rcv_established()
5356 tcp_rcv_rtt_measure_ts(sk, skb); in tcp_rcv_established()
5358 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); in tcp_rcv_established()
5361 eaten = tcp_queue_rcv(sk, skb, tcp_header_len, in tcp_rcv_established()
5365 tcp_event_data_recv(sk, skb); in tcp_rcv_established()
5369 tcp_ack(sk, skb, FLAG_DATA); in tcp_rcv_established()
5370 tcp_data_snd_check(sk); in tcp_rcv_established()
5371 if (!inet_csk_ack_scheduled(sk)) in tcp_rcv_established()
5375 __tcp_ack_snd_check(sk, 0); in tcp_rcv_established()
5379 sk->sk_data_ready(sk); in tcp_rcv_established()
5385 if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb)) in tcp_rcv_established()
5395 if (!tcp_validate_incoming(sk, skb, th, 1)) in tcp_rcv_established()
5399 if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0) in tcp_rcv_established()
5402 tcp_rcv_rtt_measure_ts(sk, skb); in tcp_rcv_established()
5405 tcp_urg(sk, skb, th); in tcp_rcv_established()
5408 tcp_data_queue(sk, skb); in tcp_rcv_established()
5410 tcp_data_snd_check(sk); in tcp_rcv_established()
5411 tcp_ack_snd_check(sk); in tcp_rcv_established()
5415 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS); in tcp_rcv_established()
5416 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); in tcp_rcv_established()
5423 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) in tcp_finish_connect() argument
5425 struct tcp_sock *tp = tcp_sk(sk); in tcp_finish_connect()
5426 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_finish_connect()
5428 tcp_set_state(sk, TCP_ESTABLISHED); in tcp_finish_connect()
5431 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); in tcp_finish_connect()
5432 security_inet_conn_established(sk, skb); in tcp_finish_connect()
5436 icsk->icsk_af_ops->rebuild_header(sk); in tcp_finish_connect()
5438 tcp_init_metrics(sk); in tcp_finish_connect()
5440 tcp_init_congestion_control(sk); in tcp_finish_connect()
5447 tcp_init_buffer_space(sk); in tcp_finish_connect()
5449 if (sock_flag(sk, SOCK_KEEPOPEN)) in tcp_finish_connect()
5450 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); in tcp_finish_connect()
5457 if (!sock_flag(sk, SOCK_DEAD)) { in tcp_finish_connect()
5458 sk->sk_state_change(sk); in tcp_finish_connect()
5459 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); in tcp_finish_connect()
5463 static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, in tcp_rcv_fastopen_synack() argument
5466 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_fastopen_synack()
5467 struct sk_buff *data = tp->syn_data ? tcp_write_queue_head(sk) : NULL; in tcp_rcv_fastopen_synack()
5499 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop, try_exp); in tcp_rcv_fastopen_synack()
5502 tcp_for_write_queue_from(data, sk) { in tcp_rcv_fastopen_synack()
5503 if (data == tcp_send_head(sk) || in tcp_rcv_fastopen_synack()
5504 __tcp_retransmit_skb(sk, data)) in tcp_rcv_fastopen_synack()
5507 tcp_rearm_rto(sk); in tcp_rcv_fastopen_synack()
5508 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL); in tcp_rcv_fastopen_synack()
5513 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE); in tcp_rcv_fastopen_synack()
5517 static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, in tcp_rcv_synsent_state_process() argument
5520 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_rcv_synsent_state_process()
5521 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_synsent_state_process()
5545 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED); in tcp_rcv_synsent_state_process()
5558 tcp_reset(sk); in tcp_rcv_synsent_state_process()
5582 tcp_ack(sk, skb, FLAG_SLOWPATH); in tcp_rcv_synsent_state_process()
5613 tcp_mtup_init(sk); in tcp_rcv_synsent_state_process()
5614 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_rcv_synsent_state_process()
5615 tcp_initialize_rcv_mss(sk); in tcp_rcv_synsent_state_process()
5624 tcp_finish_connect(sk, skb); in tcp_rcv_synsent_state_process()
5627 tcp_rcv_fastopen_synack(sk, skb, &foc)) in tcp_rcv_synsent_state_process()
5630 if (sk->sk_write_pending || in tcp_rcv_synsent_state_process()
5640 inet_csk_schedule_ack(sk); in tcp_rcv_synsent_state_process()
5642 tcp_enter_quickack_mode(sk); in tcp_rcv_synsent_state_process()
5643 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, in tcp_rcv_synsent_state_process()
5650 tcp_send_ack(sk); in tcp_rcv_synsent_state_process()
5677 tcp_set_state(sk, TCP_SYN_RECV); in tcp_rcv_synsent_state_process()
5701 tcp_mtup_init(sk); in tcp_rcv_synsent_state_process()
5702 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_rcv_synsent_state_process()
5703 tcp_initialize_rcv_mss(sk); in tcp_rcv_synsent_state_process()
5705 tcp_send_synack(sk); in tcp_rcv_synsent_state_process()
5745 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) in tcp_rcv_state_process() argument
5747 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_state_process()
5748 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_rcv_state_process()
5756 switch (sk->sk_state) { in tcp_rcv_state_process()
5770 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) in tcp_rcv_state_process()
5796 queued = tcp_rcv_synsent_state_process(sk, skb, th); in tcp_rcv_state_process()
5801 tcp_urg(sk, skb, th); in tcp_rcv_state_process()
5803 tcp_data_snd_check(sk); in tcp_rcv_state_process()
5809 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && in tcp_rcv_state_process()
5810 sk->sk_state != TCP_FIN_WAIT1); in tcp_rcv_state_process()
5812 if (!tcp_check_req(sk, skb, req, true)) in tcp_rcv_state_process()
5819 if (!tcp_validate_incoming(sk, skb, th, 0)) in tcp_rcv_state_process()
5823 acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH | in tcp_rcv_state_process()
5826 switch (sk->sk_state) { in tcp_rcv_state_process()
5832 tcp_synack_rtt_meas(sk, req); in tcp_rcv_state_process()
5839 reqsk_fastopen_remove(sk, req, false); in tcp_rcv_state_process()
5842 icsk->icsk_af_ops->rebuild_header(sk); in tcp_rcv_state_process()
5843 tcp_init_congestion_control(sk); in tcp_rcv_state_process()
5845 tcp_mtup_init(sk); in tcp_rcv_state_process()
5847 tcp_init_buffer_space(sk); in tcp_rcv_state_process()
5850 tcp_set_state(sk, TCP_ESTABLISHED); in tcp_rcv_state_process()
5851 sk->sk_state_change(sk); in tcp_rcv_state_process()
5857 if (sk->sk_socket) in tcp_rcv_state_process()
5858 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); in tcp_rcv_state_process()
5876 tcp_rearm_rto(sk); in tcp_rcv_state_process()
5878 tcp_init_metrics(sk); in tcp_rcv_state_process()
5880 tcp_update_pacing_rate(sk); in tcp_rcv_state_process()
5885 tcp_initialize_rcv_mss(sk); in tcp_rcv_state_process()
5908 reqsk_fastopen_remove(sk, req, false); in tcp_rcv_state_process()
5909 tcp_rearm_rto(sk); in tcp_rcv_state_process()
5914 tcp_set_state(sk, TCP_FIN_WAIT2); in tcp_rcv_state_process()
5915 sk->sk_shutdown |= SEND_SHUTDOWN; in tcp_rcv_state_process()
5917 dst = __sk_dst_get(sk); in tcp_rcv_state_process()
5921 if (!sock_flag(sk, SOCK_DEAD)) { in tcp_rcv_state_process()
5923 sk->sk_state_change(sk); in tcp_rcv_state_process()
5930 tcp_done(sk); in tcp_rcv_state_process()
5931 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); in tcp_rcv_state_process()
5935 tmo = tcp_fin_time(sk); in tcp_rcv_state_process()
5937 inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); in tcp_rcv_state_process()
5938 } else if (th->fin || sock_owned_by_user(sk)) { in tcp_rcv_state_process()
5945 inet_csk_reset_keepalive_timer(sk, tmo); in tcp_rcv_state_process()
5947 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); in tcp_rcv_state_process()
5955 tcp_time_wait(sk, TCP_TIME_WAIT, 0); in tcp_rcv_state_process()
5962 tcp_update_metrics(sk); in tcp_rcv_state_process()
5963 tcp_done(sk); in tcp_rcv_state_process()
5970 tcp_urg(sk, skb, th); in tcp_rcv_state_process()
5973 switch (sk->sk_state) { in tcp_rcv_state_process()
5985 if (sk->sk_shutdown & RCV_SHUTDOWN) { in tcp_rcv_state_process()
5988 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); in tcp_rcv_state_process()
5989 tcp_reset(sk); in tcp_rcv_state_process()
5995 tcp_data_queue(sk, skb); in tcp_rcv_state_process()
6001 if (sk->sk_state != TCP_CLOSE) { in tcp_rcv_state_process()
6002 tcp_data_snd_check(sk); in tcp_rcv_state_process()
6003 tcp_ack_snd_check(sk); in tcp_rcv_state_process()
6065 struct sk_buff *skb, const struct sock *sk) in tcp_openreq_init() argument
6085 ireq->ir_mark = inet_request_mark(sk, skb); in tcp_openreq_init()
6113 static bool tcp_syn_flood_action(const struct sock *sk, in tcp_syn_flood_action() argument
6117 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; in tcp_syn_flood_action()
6125 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES); in tcp_syn_flood_action()
6128 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP); in tcp_syn_flood_action()
6139 static void tcp_reqsk_record_syn(const struct sock *sk, in tcp_reqsk_record_syn() argument
6143 if (tcp_sk(sk)->save_syn) { in tcp_reqsk_record_syn()
6158 struct sock *sk, struct sk_buff *skb) in tcp_conn_request() argument
6163 struct tcp_sock *tp = tcp_sk(sk); in tcp_conn_request()
6175 inet_csk_reqsk_queue_is_full(sk)) && !isn) { in tcp_conn_request()
6176 want_cookie = tcp_syn_flood_action(sk, skb, rsk_ops->slab_name); in tcp_conn_request()
6187 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { in tcp_conn_request()
6188 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); in tcp_conn_request()
6192 req = inet_reqsk_alloc(rsk_ops, sk, !want_cookie); in tcp_conn_request()
6207 tcp_openreq_init(req, &tmp_opt, skb, sk); in tcp_conn_request()
6210 inet_rsk(req)->ir_iif = sk->sk_bound_dev_if; in tcp_conn_request()
6212 af_ops->init_req(req, sk, skb); in tcp_conn_request()
6214 if (security_inet_conn_request(sk, skb, req)) in tcp_conn_request()
6230 dst = af_ops->route_req(sk, &fl, req, &strict); in tcp_conn_request()
6235 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); in tcp_conn_request()
6241 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < in tcp_conn_request()
6260 dst = af_ops->route_req(sk, &fl, req, NULL); in tcp_conn_request()
6265 tcp_ecn_create_request(req, skb, sk, dst); in tcp_conn_request()
6268 isn = cookie_init_sequence(af_ops, sk, skb, &req->mss); in tcp_conn_request()
6276 tcp_openreq_init_rwin(req, sk, dst); in tcp_conn_request()
6278 tcp_reqsk_record_syn(sk, req, skb); in tcp_conn_request()
6279 fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc, dst); in tcp_conn_request()
6285 inet_csk_reqsk_queue_add(sk, req, fastopen_sk); in tcp_conn_request()
6286 sk->sk_data_ready(sk); in tcp_conn_request()
6292 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); in tcp_conn_request()
6293 af_ops->send_synack(sk, dst, &fl, req, in tcp_conn_request()
6306 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); in tcp_conn_request()