Lines Matching refs:tp
206 static void tcp_ecn_queue_cwr(struct tcp_sock *tp) in tcp_ecn_queue_cwr() argument
208 if (tp->ecn_flags & TCP_ECN_OK) in tcp_ecn_queue_cwr()
209 tp->ecn_flags |= TCP_ECN_QUEUE_CWR; in tcp_ecn_queue_cwr()
212 static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb) in tcp_ecn_accept_cwr() argument
215 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; in tcp_ecn_accept_cwr()
218 static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) in tcp_ecn_withdraw_cwr() argument
220 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; in tcp_ecn_withdraw_cwr()
223 static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) in __tcp_ecn_check_ce() argument
231 if (tp->ecn_flags & TCP_ECN_SEEN) in __tcp_ecn_check_ce()
232 tcp_enter_quickack_mode((struct sock *)tp); in __tcp_ecn_check_ce()
235 if (tcp_ca_needs_ecn((struct sock *)tp)) in __tcp_ecn_check_ce()
236 tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_IS_CE); in __tcp_ecn_check_ce()
238 if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { in __tcp_ecn_check_ce()
240 tcp_enter_quickack_mode((struct sock *)tp); in __tcp_ecn_check_ce()
241 tp->ecn_flags |= TCP_ECN_DEMAND_CWR; in __tcp_ecn_check_ce()
243 tp->ecn_flags |= TCP_ECN_SEEN; in __tcp_ecn_check_ce()
246 if (tcp_ca_needs_ecn((struct sock *)tp)) in __tcp_ecn_check_ce()
247 tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_NO_CE); in __tcp_ecn_check_ce()
248 tp->ecn_flags |= TCP_ECN_SEEN; in __tcp_ecn_check_ce()
253 static void tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) in tcp_ecn_check_ce() argument
255 if (tp->ecn_flags & TCP_ECN_OK) in tcp_ecn_check_ce()
256 __tcp_ecn_check_ce(tp, skb); in tcp_ecn_check_ce()
259 static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) in tcp_ecn_rcv_synack() argument
261 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr)) in tcp_ecn_rcv_synack()
262 tp->ecn_flags &= ~TCP_ECN_OK; in tcp_ecn_rcv_synack()
265 static void tcp_ecn_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th) in tcp_ecn_rcv_syn() argument
267 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr)) in tcp_ecn_rcv_syn()
268 tp->ecn_flags &= ~TCP_ECN_OK; in tcp_ecn_rcv_syn()
271 static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) in tcp_ecn_rcv_ecn_echo() argument
273 if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) in tcp_ecn_rcv_ecn_echo()
285 const struct tcp_sock *tp = tcp_sk(sk); in tcp_sndbuf_expand() local
292 per_mss = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + in tcp_sndbuf_expand()
299 nr_segs = max_t(u32, TCP_INIT_CWND, tp->snd_cwnd); in tcp_sndbuf_expand()
300 nr_segs = max_t(u32, nr_segs, tp->reordering + 1); in tcp_sndbuf_expand()
340 struct tcp_sock *tp = tcp_sk(sk); in __tcp_grow_window() local
345 while (tp->rcv_ssthresh <= window) { in __tcp_grow_window()
357 struct tcp_sock *tp = tcp_sk(sk); in tcp_grow_window() local
360 if (tp->rcv_ssthresh < tp->window_clamp && in tcp_grow_window()
361 (int)tp->rcv_ssthresh < tcp_space(sk) && in tcp_grow_window()
369 incr = 2 * tp->advmss; in tcp_grow_window()
375 tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, in tcp_grow_window()
376 tp->window_clamp); in tcp_grow_window()
406 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_buffer_space() local
414 tp->rcvq_space.space = tp->rcv_wnd; in tcp_init_buffer_space()
415 tp->rcvq_space.time = tcp_time_stamp; in tcp_init_buffer_space()
416 tp->rcvq_space.seq = tp->copied_seq; in tcp_init_buffer_space()
420 if (tp->window_clamp >= maxwin) { in tcp_init_buffer_space()
421 tp->window_clamp = maxwin; in tcp_init_buffer_space()
423 if (sysctl_tcp_app_win && maxwin > 4 * tp->advmss) in tcp_init_buffer_space()
424 tp->window_clamp = max(maxwin - in tcp_init_buffer_space()
426 4 * tp->advmss); in tcp_init_buffer_space()
431 tp->window_clamp > 2 * tp->advmss && in tcp_init_buffer_space()
432 tp->window_clamp + tp->advmss > maxwin) in tcp_init_buffer_space()
433 tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss); in tcp_init_buffer_space()
435 tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp); in tcp_init_buffer_space()
436 tp->snd_cwnd_stamp = tcp_time_stamp; in tcp_init_buffer_space()
442 struct tcp_sock *tp = tcp_sk(sk); in tcp_clamp_window() local
455 tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss); in tcp_clamp_window()
467 const struct tcp_sock *tp = tcp_sk(sk); in tcp_initialize_rcv_mss() local
468 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); in tcp_initialize_rcv_mss()
470 hint = min(hint, tp->rcv_wnd / 2); in tcp_initialize_rcv_mss()
489 static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep) in tcp_rcv_rtt_update() argument
491 u32 new_sample = tp->rcv_rtt_est.rtt; in tcp_rcv_rtt_update()
521 if (tp->rcv_rtt_est.rtt != new_sample) in tcp_rcv_rtt_update()
522 tp->rcv_rtt_est.rtt = new_sample; in tcp_rcv_rtt_update()
525 static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) in tcp_rcv_rtt_measure() argument
527 if (tp->rcv_rtt_est.time == 0) in tcp_rcv_rtt_measure()
529 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) in tcp_rcv_rtt_measure()
531 tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rcv_rtt_est.time, 1); in tcp_rcv_rtt_measure()
534 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; in tcp_rcv_rtt_measure()
535 tp->rcv_rtt_est.time = tcp_time_stamp; in tcp_rcv_rtt_measure()
541 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_rtt_measure_ts() local
542 if (tp->rx_opt.rcv_tsecr && in tcp_rcv_rtt_measure_ts()
545 tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0); in tcp_rcv_rtt_measure_ts()
554 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_space_adjust() local
558 time = tcp_time_stamp - tp->rcvq_space.time; in tcp_rcv_space_adjust()
559 if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0) in tcp_rcv_space_adjust()
563 copied = tp->copied_seq - tp->rcvq_space.seq; in tcp_rcv_space_adjust()
564 if (copied <= tp->rcvq_space.space) in tcp_rcv_space_adjust()
583 rcvwin = (copied << 1) + 16 * tp->advmss; in tcp_rcv_space_adjust()
591 tp->rcvq_space.space + (tp->rcvq_space.space >> 2)) { in tcp_rcv_space_adjust()
593 tp->rcvq_space.space + (tp->rcvq_space.space >> 1)) in tcp_rcv_space_adjust()
599 rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER); in tcp_rcv_space_adjust()
600 while (tcp_win_from_space(rcvmem) < tp->advmss) in tcp_rcv_space_adjust()
603 rcvbuf = min(rcvwin / tp->advmss * rcvmem, sysctl_tcp_rmem[2]); in tcp_rcv_space_adjust()
608 tp->window_clamp = rcvwin; in tcp_rcv_space_adjust()
611 tp->rcvq_space.space = copied; in tcp_rcv_space_adjust()
614 tp->rcvq_space.seq = tp->copied_seq; in tcp_rcv_space_adjust()
615 tp->rcvq_space.time = tcp_time_stamp; in tcp_rcv_space_adjust()
630 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_data_recv() local
638 tcp_rcv_rtt_measure(tp); in tcp_event_data_recv()
668 tcp_ecn_check_ce(tp, skb); in tcp_event_data_recv()
685 struct tcp_sock *tp = tcp_sk(sk); in tcp_rtt_estimator() local
687 u32 srtt = tp->srtt_us; in tcp_rtt_estimator()
710 m -= (tp->mdev_us >> 2); /* similar update on mdev */ in tcp_rtt_estimator()
722 m -= (tp->mdev_us >> 2); /* similar update on mdev */ in tcp_rtt_estimator()
724 tp->mdev_us += m; /* mdev = 3/4 mdev + 1/4 new */ in tcp_rtt_estimator()
725 if (tp->mdev_us > tp->mdev_max_us) { in tcp_rtt_estimator()
726 tp->mdev_max_us = tp->mdev_us; in tcp_rtt_estimator()
727 if (tp->mdev_max_us > tp->rttvar_us) in tcp_rtt_estimator()
728 tp->rttvar_us = tp->mdev_max_us; in tcp_rtt_estimator()
730 if (after(tp->snd_una, tp->rtt_seq)) { in tcp_rtt_estimator()
731 if (tp->mdev_max_us < tp->rttvar_us) in tcp_rtt_estimator()
732 tp->rttvar_us -= (tp->rttvar_us - tp->mdev_max_us) >> 2; in tcp_rtt_estimator()
733 tp->rtt_seq = tp->snd_nxt; in tcp_rtt_estimator()
734 tp->mdev_max_us = tcp_rto_min_us(sk); in tcp_rtt_estimator()
739 tp->mdev_us = m << 1; /* make sure rto = 3*rtt */ in tcp_rtt_estimator()
740 tp->rttvar_us = max(tp->mdev_us, tcp_rto_min_us(sk)); in tcp_rtt_estimator()
741 tp->mdev_max_us = tp->rttvar_us; in tcp_rtt_estimator()
742 tp->rtt_seq = tp->snd_nxt; in tcp_rtt_estimator()
744 tp->srtt_us = max(1U, srtt); in tcp_rtt_estimator()
755 const struct tcp_sock *tp = tcp_sk(sk); in tcp_update_pacing_rate() local
759 rate = (u64)tp->mss_cache * 2 * (USEC_PER_SEC << 3); in tcp_update_pacing_rate()
761 rate *= max(tp->snd_cwnd, tp->packets_out); in tcp_update_pacing_rate()
763 if (likely(tp->srtt_us)) in tcp_update_pacing_rate()
764 do_div(rate, tp->srtt_us); in tcp_update_pacing_rate()
779 const struct tcp_sock *tp = tcp_sk(sk); in tcp_set_rto() local
790 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp); in tcp_set_rto()
804 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) in tcp_init_cwnd() argument
810 return min_t(__u32, cwnd, tp->snd_cwnd_clamp); in tcp_init_cwnd()
817 void tcp_disable_fack(struct tcp_sock *tp) in tcp_disable_fack() argument
820 if (tcp_is_fack(tp)) in tcp_disable_fack()
821 tp->lost_skb_hint = NULL; in tcp_disable_fack()
822 tp->rx_opt.sack_ok &= ~TCP_FACK_ENABLED; in tcp_disable_fack()
826 static void tcp_dsack_seen(struct tcp_sock *tp) in tcp_dsack_seen() argument
828 tp->rx_opt.sack_ok |= TCP_DSACK_SEEN; in tcp_dsack_seen()
834 struct tcp_sock *tp = tcp_sk(sk); in tcp_update_reordering() local
835 if (metric > tp->reordering) { in tcp_update_reordering()
838 tp->reordering = min(sysctl_tcp_max_reordering, metric); in tcp_update_reordering()
843 else if (tcp_is_reno(tp)) in tcp_update_reordering()
845 else if (tcp_is_fack(tp)) in tcp_update_reordering()
853 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, in tcp_update_reordering()
854 tp->reordering, in tcp_update_reordering()
855 tp->fackets_out, in tcp_update_reordering()
856 tp->sacked_out, in tcp_update_reordering()
857 tp->undo_marker ? tp->undo_retrans : 0); in tcp_update_reordering()
859 tcp_disable_fack(tp); in tcp_update_reordering()
863 tcp_disable_early_retrans(tp); in tcp_update_reordering()
867 static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) in tcp_verify_retransmit_hint() argument
869 if (!tp->retransmit_skb_hint || in tcp_verify_retransmit_hint()
871 TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) in tcp_verify_retransmit_hint()
872 tp->retransmit_skb_hint = skb; in tcp_verify_retransmit_hint()
874 if (!tp->lost_out || in tcp_verify_retransmit_hint()
875 after(TCP_SKB_CB(skb)->end_seq, tp->retransmit_high)) in tcp_verify_retransmit_hint()
876 tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; in tcp_verify_retransmit_hint()
879 static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb) in tcp_skb_mark_lost() argument
882 tcp_verify_retransmit_hint(tp, skb); in tcp_skb_mark_lost()
884 tp->lost_out += tcp_skb_pcount(skb); in tcp_skb_mark_lost()
889 static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, in tcp_skb_mark_lost_uncond_verify() argument
892 tcp_verify_retransmit_hint(tp, skb); in tcp_skb_mark_lost_uncond_verify()
895 tp->lost_out += tcp_skb_pcount(skb); in tcp_skb_mark_lost_uncond_verify()
994 static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack, in tcp_is_sackblock_valid() argument
998 if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq)) in tcp_is_sackblock_valid()
1002 if (!before(start_seq, tp->snd_nxt)) in tcp_is_sackblock_valid()
1008 if (after(start_seq, tp->snd_una)) in tcp_is_sackblock_valid()
1011 if (!is_dsack || !tp->undo_marker) in tcp_is_sackblock_valid()
1015 if (after(end_seq, tp->snd_una)) in tcp_is_sackblock_valid()
1018 if (!before(start_seq, tp->undo_marker)) in tcp_is_sackblock_valid()
1022 if (!after(end_seq, tp->undo_marker)) in tcp_is_sackblock_valid()
1028 return !before(start_seq, end_seq - tp->max_window); in tcp_is_sackblock_valid()
1043 struct tcp_sock *tp = tcp_sk(sk); in tcp_mark_lost_retrans() local
1046 u32 new_low_seq = tp->snd_nxt; in tcp_mark_lost_retrans()
1047 u32 received_upto = tcp_highest_sack_seq(tp); in tcp_mark_lost_retrans()
1049 if (!tcp_is_fack(tp) || !tp->retrans_out || in tcp_mark_lost_retrans()
1050 !after(received_upto, tp->lost_retrans_low) || in tcp_mark_lost_retrans()
1059 if (cnt == tp->retrans_out) in tcp_mark_lost_retrans()
1061 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) in tcp_mark_lost_retrans()
1080 tp->retrans_out -= tcp_skb_pcount(skb); in tcp_mark_lost_retrans()
1082 tcp_skb_mark_lost_uncond_verify(tp, skb); in tcp_mark_lost_retrans()
1091 if (tp->retrans_out) in tcp_mark_lost_retrans()
1092 tp->lost_retrans_low = new_low_seq; in tcp_mark_lost_retrans()
1099 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_dsack() local
1106 tcp_dsack_seen(tp); in tcp_check_dsack()
1115 tcp_dsack_seen(tp); in tcp_check_dsack()
1122 if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 && in tcp_check_dsack()
1124 after(end_seq_0, tp->undo_marker)) in tcp_check_dsack()
1125 tp->undo_retrans--; in tcp_check_dsack()
1198 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_one() local
1203 if (tp->undo_marker && tp->undo_retrans > 0 && in tcp_sacktag_one()
1204 after(end_seq, tp->undo_marker)) in tcp_sacktag_one()
1205 tp->undo_retrans--; in tcp_sacktag_one()
1211 if (!after(end_seq, tp->snd_una)) in tcp_sacktag_one()
1222 tp->lost_out -= pcount; in tcp_sacktag_one()
1223 tp->retrans_out -= pcount; in tcp_sacktag_one()
1231 tcp_highest_sack_seq(tp))) in tcp_sacktag_one()
1234 if (!after(end_seq, tp->high_seq)) in tcp_sacktag_one()
1248 tp->lost_out -= pcount; in tcp_sacktag_one()
1254 tp->sacked_out += pcount; in tcp_sacktag_one()
1259 if (!tcp_is_fack(tp) && tp->lost_skb_hint && in tcp_sacktag_one()
1260 before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq)) in tcp_sacktag_one()
1261 tp->lost_cnt_hint += pcount; in tcp_sacktag_one()
1263 if (fack_count > tp->fackets_out) in tcp_sacktag_one()
1264 tp->fackets_out = fack_count; in tcp_sacktag_one()
1273 tp->retrans_out -= pcount; in tcp_sacktag_one()
1287 struct tcp_sock *tp = tcp_sk(sk); in tcp_shifted_skb() local
1304 if (skb == tp->lost_skb_hint) in tcp_shifted_skb()
1305 tp->lost_cnt_hint += pcount; in tcp_shifted_skb()
1341 if (skb == tp->retransmit_skb_hint) in tcp_shifted_skb()
1342 tp->retransmit_skb_hint = prev; in tcp_shifted_skb()
1343 if (skb == tp->lost_skb_hint) { in tcp_shifted_skb()
1344 tp->lost_skb_hint = prev; in tcp_shifted_skb()
1345 tp->lost_cnt_hint -= tcp_skb_pcount(prev); in tcp_shifted_skb()
1385 struct tcp_sock *tp = tcp_sk(sk); in tcp_shift_skb_data() local
1402 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) in tcp_shift_skb_data()
1479 if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una)) in tcp_shift_skb_data()
1524 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_walk() local
1583 tcp_highest_sack_seq(tp))) in tcp_sacktag_walk()
1630 static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache) in tcp_sack_cache_ok() argument
1632 return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); in tcp_sack_cache_ok()
1639 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_write_queue() local
1654 state.reord = tp->packets_out; in tcp_sacktag_write_queue()
1657 if (!tp->sacked_out) { in tcp_sacktag_write_queue()
1658 if (WARN_ON(tp->fackets_out)) in tcp_sacktag_write_queue()
1659 tp->fackets_out = 0; in tcp_sacktag_write_queue()
1672 if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window)) in tcp_sacktag_write_queue()
1675 if (!tp->packets_out) in tcp_sacktag_write_queue()
1686 if (!tcp_is_sackblock_valid(tp, dup_sack, in tcp_sacktag_write_queue()
1692 if (!tp->undo_marker) in tcp_sacktag_write_queue()
1698 if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) && in tcp_sacktag_write_queue()
1699 !after(sp[used_sacks].end_seq, tp->snd_una)) in tcp_sacktag_write_queue()
1734 if (!tp->sacked_out) { in tcp_sacktag_write_queue()
1736 cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); in tcp_sacktag_write_queue()
1738 cache = tp->recv_sack_cache; in tcp_sacktag_write_queue()
1740 while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq && in tcp_sacktag_write_queue()
1755 while (tcp_sack_cache_ok(tp, cache) && in tcp_sacktag_write_queue()
1760 if (tcp_sack_cache_ok(tp, cache) && !dup_sack && in tcp_sacktag_write_queue()
1783 if (tcp_highest_sack_seq(tp) == cache->end_seq) { in tcp_sacktag_write_queue()
1788 state.fack_count = tp->fackets_out; in tcp_sacktag_write_queue()
1799 if (!before(start_seq, tcp_highest_sack_seq(tp))) { in tcp_sacktag_write_queue()
1803 state.fack_count = tp->fackets_out; in tcp_sacktag_write_queue()
1816 for (i = 0; i < ARRAY_SIZE(tp->recv_sack_cache) - used_sacks; i++) { in tcp_sacktag_write_queue()
1817 tp->recv_sack_cache[i].start_seq = 0; in tcp_sacktag_write_queue()
1818 tp->recv_sack_cache[i].end_seq = 0; in tcp_sacktag_write_queue()
1821 tp->recv_sack_cache[i++] = sp[j]; in tcp_sacktag_write_queue()
1823 if ((state.reord < tp->fackets_out) && in tcp_sacktag_write_queue()
1824 ((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker)) in tcp_sacktag_write_queue()
1825 tcp_update_reordering(sk, tp->fackets_out - state.reord, 0); in tcp_sacktag_write_queue()
1828 tcp_verify_left_out(tp); in tcp_sacktag_write_queue()
1832 WARN_ON((int)tp->sacked_out < 0); in tcp_sacktag_write_queue()
1833 WARN_ON((int)tp->lost_out < 0); in tcp_sacktag_write_queue()
1834 WARN_ON((int)tp->retrans_out < 0); in tcp_sacktag_write_queue()
1835 WARN_ON((int)tcp_packets_in_flight(tp) < 0); in tcp_sacktag_write_queue()
1844 static bool tcp_limit_reno_sacked(struct tcp_sock *tp) in tcp_limit_reno_sacked() argument
1848 holes = max(tp->lost_out, 1U); in tcp_limit_reno_sacked()
1849 holes = min(holes, tp->packets_out); in tcp_limit_reno_sacked()
1851 if ((tp->sacked_out + holes) > tp->packets_out) { in tcp_limit_reno_sacked()
1852 tp->sacked_out = tp->packets_out - holes; in tcp_limit_reno_sacked()
1864 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_reno_reordering() local
1865 if (tcp_limit_reno_sacked(tp)) in tcp_check_reno_reordering()
1866 tcp_update_reordering(sk, tp->packets_out + addend, 0); in tcp_check_reno_reordering()
1873 struct tcp_sock *tp = tcp_sk(sk); in tcp_add_reno_sack() local
1874 tp->sacked_out++; in tcp_add_reno_sack()
1876 tcp_verify_left_out(tp); in tcp_add_reno_sack()
1883 struct tcp_sock *tp = tcp_sk(sk); in tcp_remove_reno_sacks() local
1887 if (acked - 1 >= tp->sacked_out) in tcp_remove_reno_sacks()
1888 tp->sacked_out = 0; in tcp_remove_reno_sacks()
1890 tp->sacked_out -= acked - 1; in tcp_remove_reno_sacks()
1893 tcp_verify_left_out(tp); in tcp_remove_reno_sacks()
1896 static inline void tcp_reset_reno_sack(struct tcp_sock *tp) in tcp_reset_reno_sack() argument
1898 tp->sacked_out = 0; in tcp_reset_reno_sack()
1901 void tcp_clear_retrans(struct tcp_sock *tp) in tcp_clear_retrans() argument
1903 tp->retrans_out = 0; in tcp_clear_retrans()
1904 tp->lost_out = 0; in tcp_clear_retrans()
1905 tp->undo_marker = 0; in tcp_clear_retrans()
1906 tp->undo_retrans = -1; in tcp_clear_retrans()
1907 tp->fackets_out = 0; in tcp_clear_retrans()
1908 tp->sacked_out = 0; in tcp_clear_retrans()
1911 static inline void tcp_init_undo(struct tcp_sock *tp) in tcp_init_undo() argument
1913 tp->undo_marker = tp->snd_una; in tcp_init_undo()
1915 tp->undo_retrans = tp->retrans_out ? : -1; in tcp_init_undo()
1925 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_loss() local
1932 !after(tp->high_seq, tp->snd_una) || in tcp_enter_loss()
1935 tp->prior_ssthresh = tcp_current_ssthresh(sk); in tcp_enter_loss()
1936 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); in tcp_enter_loss()
1938 tcp_init_undo(tp); in tcp_enter_loss()
1940 tp->snd_cwnd = 1; in tcp_enter_loss()
1941 tp->snd_cwnd_cnt = 0; in tcp_enter_loss()
1942 tp->snd_cwnd_stamp = tcp_time_stamp; in tcp_enter_loss()
1944 tp->retrans_out = 0; in tcp_enter_loss()
1945 tp->lost_out = 0; in tcp_enter_loss()
1947 if (tcp_is_reno(tp)) in tcp_enter_loss()
1948 tcp_reset_reno_sack(tp); in tcp_enter_loss()
1954 tp->sacked_out = 0; in tcp_enter_loss()
1955 tp->fackets_out = 0; in tcp_enter_loss()
1957 tcp_clear_all_retrans_hints(tp); in tcp_enter_loss()
1967 tp->lost_out += tcp_skb_pcount(skb); in tcp_enter_loss()
1968 tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; in tcp_enter_loss()
1971 tcp_verify_left_out(tp); in tcp_enter_loss()
1977 tp->sacked_out >= sysctl_tcp_reordering) in tcp_enter_loss()
1978 tp->reordering = min_t(unsigned int, tp->reordering, in tcp_enter_loss()
1981 tp->high_seq = tp->snd_nxt; in tcp_enter_loss()
1982 tcp_ecn_queue_cwr(tp); in tcp_enter_loss()
1988 tp->frto = sysctl_tcp_frto && in tcp_enter_loss()
2006 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_sack_reneging() local
2007 unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4), in tcp_check_sack_reneging()
2017 static inline int tcp_fackets_out(const struct tcp_sock *tp) in tcp_fackets_out() argument
2019 return tcp_is_reno(tp) ? tp->sacked_out + 1 : tp->fackets_out; in tcp_fackets_out()
2037 static inline int tcp_dupack_heuristics(const struct tcp_sock *tp) in tcp_dupack_heuristics() argument
2039 return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1; in tcp_dupack_heuristics()
2044 struct tcp_sock *tp = tcp_sk(sk); in tcp_pause_early_retransmit() local
2052 (flag & FLAG_ECE) || !tp->srtt_us) in tcp_pause_early_retransmit()
2055 delay = max(usecs_to_jiffies(tp->srtt_us >> 5), in tcp_pause_early_retransmit()
2161 struct tcp_sock *tp = tcp_sk(sk); in tcp_time_to_recover() local
2165 if (tp->lost_out) in tcp_time_to_recover()
2169 if (tcp_dupack_heuristics(tp) > tp->reordering) in tcp_time_to_recover()
2175 packets_out = tp->packets_out; in tcp_time_to_recover()
2176 if (packets_out <= tp->reordering && in tcp_time_to_recover()
2177 tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) && in tcp_time_to_recover()
2190 if ((tp->thin_dupack || sysctl_tcp_thin_dupack) && in tcp_time_to_recover()
2191 tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 && in tcp_time_to_recover()
2192 tcp_is_sack(tp) && !tcp_send_head(sk)) in tcp_time_to_recover()
2200 if (tp->do_early_retrans && !tp->retrans_out && tp->sacked_out && in tcp_time_to_recover()
2201 (tp->packets_out >= (tp->sacked_out + 1) && tp->packets_out < 4) && in tcp_time_to_recover()
2216 struct tcp_sock *tp = tcp_sk(sk); in tcp_mark_head_lost() local
2222 const u32 loss_high = tcp_is_sack(tp) ? tp->snd_nxt : tp->high_seq; in tcp_mark_head_lost()
2224 WARN_ON(packets > tp->packets_out); in tcp_mark_head_lost()
2225 if (tp->lost_skb_hint) { in tcp_mark_head_lost()
2226 skb = tp->lost_skb_hint; in tcp_mark_head_lost()
2227 cnt = tp->lost_cnt_hint; in tcp_mark_head_lost()
2241 tp->lost_skb_hint = skb; in tcp_mark_head_lost()
2242 tp->lost_cnt_hint = cnt; in tcp_mark_head_lost()
2248 if (tcp_is_fack(tp) || tcp_is_reno(tp) || in tcp_mark_head_lost()
2253 if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) || in tcp_mark_head_lost()
2266 tcp_skb_mark_lost(tp, skb); in tcp_mark_head_lost()
2271 tcp_verify_left_out(tp); in tcp_mark_head_lost()
2278 struct tcp_sock *tp = tcp_sk(sk); in tcp_update_scoreboard() local
2280 if (tcp_is_reno(tp)) { in tcp_update_scoreboard()
2282 } else if (tcp_is_fack(tp)) { in tcp_update_scoreboard()
2283 int lost = tp->fackets_out - tp->reordering; in tcp_update_scoreboard()
2288 int sacked_upto = tp->sacked_out - tp->reordering; in tcp_update_scoreboard()
2299 static inline void tcp_moderate_cwnd(struct tcp_sock *tp) in tcp_moderate_cwnd() argument
2301 tp->snd_cwnd = min(tp->snd_cwnd, in tcp_moderate_cwnd()
2302 tcp_packets_in_flight(tp) + tcp_max_burst(tp)); in tcp_moderate_cwnd()
2303 tp->snd_cwnd_stamp = tcp_time_stamp; in tcp_moderate_cwnd()
2309 static inline bool tcp_packet_delayed(const struct tcp_sock *tp) in tcp_packet_delayed() argument
2311 return !tp->retrans_stamp || in tcp_packet_delayed()
2312 (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && in tcp_packet_delayed()
2313 before(tp->rx_opt.rcv_tsecr, tp->retrans_stamp)); in tcp_packet_delayed()
2334 const struct tcp_sock *tp = tcp_sk(sk); in tcp_any_retrans_done() local
2337 if (tp->retrans_out) in tcp_any_retrans_done()
2350 struct tcp_sock *tp = tcp_sk(sk); in DBGUNDO() local
2357 tp->snd_cwnd, tcp_left_out(tp), in DBGUNDO()
2358 tp->snd_ssthresh, tp->prior_ssthresh, in DBGUNDO()
2359 tp->packets_out); in DBGUNDO()
2367 tp->snd_cwnd, tcp_left_out(tp), in DBGUNDO()
2368 tp->snd_ssthresh, tp->prior_ssthresh, in DBGUNDO()
2369 tp->packets_out); in DBGUNDO()
2379 struct tcp_sock *tp = tcp_sk(sk); in tcp_undo_cwnd_reduction() local
2389 tp->lost_out = 0; in tcp_undo_cwnd_reduction()
2390 tcp_clear_all_retrans_hints(tp); in tcp_undo_cwnd_reduction()
2393 if (tp->prior_ssthresh) { in tcp_undo_cwnd_reduction()
2397 tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); in tcp_undo_cwnd_reduction()
2399 tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1); in tcp_undo_cwnd_reduction()
2401 if (tp->prior_ssthresh > tp->snd_ssthresh) { in tcp_undo_cwnd_reduction()
2402 tp->snd_ssthresh = tp->prior_ssthresh; in tcp_undo_cwnd_reduction()
2403 tcp_ecn_withdraw_cwr(tp); in tcp_undo_cwnd_reduction()
2406 tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh); in tcp_undo_cwnd_reduction()
2408 tp->snd_cwnd_stamp = tcp_time_stamp; in tcp_undo_cwnd_reduction()
2409 tp->undo_marker = 0; in tcp_undo_cwnd_reduction()
2412 static inline bool tcp_may_undo(const struct tcp_sock *tp) in tcp_may_undo() argument
2414 return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); in tcp_may_undo()
2420 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_recovery() local
2422 if (tcp_may_undo(tp)) { in tcp_try_undo_recovery()
2437 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { in tcp_try_undo_recovery()
2441 tcp_moderate_cwnd(tp); in tcp_try_undo_recovery()
2443 tp->retrans_stamp = 0; in tcp_try_undo_recovery()
2453 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_dsack() local
2455 if (tp->undo_marker && !tp->undo_retrans) { in tcp_try_undo_dsack()
2467 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_loss() local
2469 if (frto_undo || tcp_may_undo(tp)) { in tcp_try_undo_loss()
2478 if (frto_undo || tcp_is_sack(tp)) in tcp_try_undo_loss()
2497 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_cwnd_reduction() local
2499 tp->high_seq = tp->snd_nxt; in tcp_init_cwnd_reduction()
2500 tp->tlp_high_seq = 0; in tcp_init_cwnd_reduction()
2501 tp->snd_cwnd_cnt = 0; in tcp_init_cwnd_reduction()
2502 tp->prior_cwnd = tp->snd_cwnd; in tcp_init_cwnd_reduction()
2503 tp->prr_delivered = 0; in tcp_init_cwnd_reduction()
2504 tp->prr_out = 0; in tcp_init_cwnd_reduction()
2505 tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); in tcp_init_cwnd_reduction()
2506 tcp_ecn_queue_cwr(tp); in tcp_init_cwnd_reduction()
2512 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_reduction() local
2514 int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp); in tcp_cwnd_reduction()
2516 (tp->packets_out - tp->sacked_out); in tcp_cwnd_reduction()
2518 tp->prr_delivered += newly_acked_sacked; in tcp_cwnd_reduction()
2519 if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) { in tcp_cwnd_reduction()
2520 u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered + in tcp_cwnd_reduction()
2521 tp->prior_cwnd - 1; in tcp_cwnd_reduction()
2522 sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out; in tcp_cwnd_reduction()
2525 max_t(int, tp->prr_delivered - tp->prr_out, in tcp_cwnd_reduction()
2530 tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt; in tcp_cwnd_reduction()
2535 struct tcp_sock *tp = tcp_sk(sk); in tcp_end_cwnd_reduction() local
2539 (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) { in tcp_end_cwnd_reduction()
2540 tp->snd_cwnd = tp->snd_ssthresh; in tcp_end_cwnd_reduction()
2541 tp->snd_cwnd_stamp = tcp_time_stamp; in tcp_end_cwnd_reduction()
2549 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_cwr() local
2551 tp->prior_ssthresh = 0; in tcp_enter_cwr()
2553 tp->undo_marker = 0; in tcp_enter_cwr()
2561 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_keep_open() local
2564 if (tcp_left_out(tp) || tcp_any_retrans_done(sk)) in tcp_try_keep_open()
2569 tp->high_seq = tp->snd_nxt; in tcp_try_keep_open()
2575 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_to_open() local
2577 tcp_verify_left_out(tp); in tcp_try_to_open()
2580 tp->retrans_stamp = 0; in tcp_try_to_open()
2602 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtup_probe_success() local
2606 tp->prior_ssthresh = tcp_current_ssthresh(sk); in tcp_mtup_probe_success()
2607 tp->snd_cwnd = tp->snd_cwnd * in tcp_mtup_probe_success()
2608 tcp_mss_to_mtu(sk, tp->mss_cache) / in tcp_mtup_probe_success()
2610 tp->snd_cwnd_cnt = 0; in tcp_mtup_probe_success()
2611 tp->snd_cwnd_stamp = tcp_time_stamp; in tcp_mtup_probe_success()
2612 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_mtup_probe_success()
2626 struct tcp_sock *tp = tcp_sk(sk); in tcp_simple_retransmit() local
2629 u32 prior_lost = tp->lost_out; in tcp_simple_retransmit()
2638 tp->retrans_out -= tcp_skb_pcount(skb); in tcp_simple_retransmit()
2640 tcp_skb_mark_lost_uncond_verify(tp, skb); in tcp_simple_retransmit()
2644 tcp_clear_retrans_hints_partial(tp); in tcp_simple_retransmit()
2646 if (prior_lost == tp->lost_out) in tcp_simple_retransmit()
2649 if (tcp_is_reno(tp)) in tcp_simple_retransmit()
2650 tcp_limit_reno_sacked(tp); in tcp_simple_retransmit()
2652 tcp_verify_left_out(tp); in tcp_simple_retransmit()
2660 tp->high_seq = tp->snd_nxt; in tcp_simple_retransmit()
2661 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_simple_retransmit()
2662 tp->prior_ssthresh = 0; in tcp_simple_retransmit()
2663 tp->undo_marker = 0; in tcp_simple_retransmit()
2672 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_recovery() local
2675 if (tcp_is_reno(tp)) in tcp_enter_recovery()
2682 tp->prior_ssthresh = 0; in tcp_enter_recovery()
2683 tcp_init_undo(tp); in tcp_enter_recovery()
2687 tp->prior_ssthresh = tcp_current_ssthresh(sk); in tcp_enter_recovery()
2698 struct tcp_sock *tp = tcp_sk(sk); in tcp_process_loss() local
2699 bool recovered = !before(tp->snd_una, tp->high_seq); in tcp_process_loss()
2705 if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ in tcp_process_loss()
2713 if (after(tp->snd_nxt, tp->high_seq)) { in tcp_process_loss()
2715 tp->frto = 0; /* Step 3.a. loss was real */ in tcp_process_loss()
2717 tp->high_seq = tp->snd_nxt; in tcp_process_loss()
2720 if (after(tp->snd_nxt, tp->high_seq)) in tcp_process_loss()
2722 tp->frto = 0; in tcp_process_loss()
2731 if (tcp_is_reno(tp)) { in tcp_process_loss()
2735 if (after(tp->snd_nxt, tp->high_seq) && is_dupack) in tcp_process_loss()
2738 tcp_reset_reno_sack(tp); in tcp_process_loss()
2747 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_partial() local
2749 if (tp->undo_marker && tcp_packet_delayed(tp)) { in tcp_try_undo_partial()
2753 tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); in tcp_try_undo_partial()
2760 if (tp->retrans_out) { in tcp_try_undo_partial()
2766 tp->retrans_stamp = 0; in tcp_try_undo_partial()
2793 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastretrans_alert() local
2795 (tcp_fackets_out(tp) > tp->reordering)); in tcp_fastretrans_alert()
2798 if (WARN_ON(!tp->packets_out && tp->sacked_out)) in tcp_fastretrans_alert()
2799 tp->sacked_out = 0; in tcp_fastretrans_alert()
2800 if (WARN_ON(!tp->sacked_out && tp->fackets_out)) in tcp_fastretrans_alert()
2801 tp->fackets_out = 0; in tcp_fastretrans_alert()
2806 tp->prior_ssthresh = 0; in tcp_fastretrans_alert()
2813 tcp_verify_left_out(tp); in tcp_fastretrans_alert()
2818 WARN_ON(tp->retrans_out != 0); in tcp_fastretrans_alert()
2819 tp->retrans_stamp = 0; in tcp_fastretrans_alert()
2820 } else if (!before(tp->snd_una, tp->high_seq)) { in tcp_fastretrans_alert()
2825 if (tp->snd_una != tp->high_seq) { in tcp_fastretrans_alert()
2832 if (tcp_is_reno(tp)) in tcp_fastretrans_alert()
2833 tcp_reset_reno_sack(tp); in tcp_fastretrans_alert()
2845 if (tcp_is_reno(tp) && is_dupack) in tcp_fastretrans_alert()
2851 do_lost = tcp_is_reno(tp) || in tcp_fastretrans_alert()
2852 tcp_fackets_out(tp) > tp->reordering; in tcp_fastretrans_alert()
2865 if (tcp_is_reno(tp)) { in tcp_fastretrans_alert()
2867 tcp_reset_reno_sack(tp); in tcp_fastretrans_alert()
2883 tp->snd_una == tp->mtu_probe.probe_seq_start) { in tcp_fastretrans_alert()
2886 tp->snd_cwnd++; in tcp_fastretrans_alert()
2905 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_update_rtt() local
2924 if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && in tcp_ack_update_rtt()
2926 seq_rtt_us = jiffies_to_usecs(tcp_time_stamp - tp->rx_opt.rcv_tsecr); in tcp_ack_update_rtt()
2942 struct tcp_sock *tp = tcp_sk(sk); in tcp_synack_rtt_meas() local
2945 if (synack_stamp && !tp->total_retrans) in tcp_synack_rtt_meas()
2951 if (!tp->srtt_us) in tcp_synack_rtt_meas()
2969 struct tcp_sock *tp = tcp_sk(sk); in tcp_rearm_rto() local
2974 if (tp->fastopen_rsk) in tcp_rearm_rto()
2977 if (!tp->packets_out) { in tcp_rearm_rto()
3004 struct tcp_sock *tp = tcp_sk(sk); in tcp_resume_early_retransmit() local
3009 if (!tp->do_early_retrans) in tcp_resume_early_retransmit()
3020 struct tcp_sock *tp = tcp_sk(sk); in tcp_tso_acked() local
3023 BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)); in tcp_tso_acked()
3026 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) in tcp_tso_acked()
3062 struct tcp_sock *tp = tcp_sk(sk); in tcp_clean_rtx_queue() local
3063 u32 prior_sacked = tp->sacked_out; in tcp_clean_rtx_queue()
3064 u32 reord = tp->packets_out; in tcp_clean_rtx_queue()
3083 if (after(scb->end_seq, tp->snd_una)) { in tcp_clean_rtx_queue()
3085 !after(tp->snd_una, scb->seq)) in tcp_clean_rtx_queue()
3101 tp->retrans_out -= acked_pcount; in tcp_clean_rtx_queue()
3110 if (!after(scb->end_seq, tp->high_seq)) in tcp_clean_rtx_queue()
3115 tp->sacked_out -= acked_pcount; in tcp_clean_rtx_queue()
3117 tp->lost_out -= acked_pcount; in tcp_clean_rtx_queue()
3119 tp->packets_out -= acked_pcount; in tcp_clean_rtx_queue()
3133 tp->retrans_stamp = 0; in tcp_clean_rtx_queue()
3141 if (unlikely(skb == tp->retransmit_skb_hint)) in tcp_clean_rtx_queue()
3142 tp->retransmit_skb_hint = NULL; in tcp_clean_rtx_queue()
3143 if (unlikely(skb == tp->lost_skb_hint)) in tcp_clean_rtx_queue()
3144 tp->lost_skb_hint = NULL; in tcp_clean_rtx_queue()
3147 if (likely(between(tp->snd_up, prior_snd_una, tp->snd_una))) in tcp_clean_rtx_queue()
3148 tp->snd_up = tp->snd_una; in tcp_clean_rtx_queue()
3167 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { in tcp_clean_rtx_queue()
3171 if (tcp_is_reno(tp)) { in tcp_clean_rtx_queue()
3178 tcp_update_reordering(sk, tp->fackets_out - reord, 0); in tcp_clean_rtx_queue()
3180 delta = tcp_is_fack(tp) ? pkts_acked : in tcp_clean_rtx_queue()
3181 prior_sacked - tp->sacked_out; in tcp_clean_rtx_queue()
3182 tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta); in tcp_clean_rtx_queue()
3185 tp->fackets_out -= min(pkts_acked, tp->fackets_out); in tcp_clean_rtx_queue()
3202 WARN_ON((int)tp->sacked_out < 0); in tcp_clean_rtx_queue()
3203 WARN_ON((int)tp->lost_out < 0); in tcp_clean_rtx_queue()
3204 WARN_ON((int)tp->retrans_out < 0); in tcp_clean_rtx_queue()
3205 if (!tp->packets_out && tcp_is_sack(tp)) { in tcp_clean_rtx_queue()
3207 if (tp->lost_out) { in tcp_clean_rtx_queue()
3209 tp->lost_out, icsk->icsk_ca_state); in tcp_clean_rtx_queue()
3210 tp->lost_out = 0; in tcp_clean_rtx_queue()
3212 if (tp->sacked_out) { in tcp_clean_rtx_queue()
3214 tp->sacked_out, icsk->icsk_ca_state); in tcp_clean_rtx_queue()
3215 tp->sacked_out = 0; in tcp_clean_rtx_queue()
3217 if (tp->retrans_out) { in tcp_clean_rtx_queue()
3219 tp->retrans_out, icsk->icsk_ca_state); in tcp_clean_rtx_queue()
3220 tp->retrans_out = 0; in tcp_clean_rtx_queue()
3229 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_probe() local
3234 if (!after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, tcp_wnd_end(tp))) { in tcp_ack_probe()
3275 static inline bool tcp_may_update_window(const struct tcp_sock *tp, in tcp_may_update_window() argument
3279 return after(ack, tp->snd_una) || in tcp_may_update_window()
3280 after(ack_seq, tp->snd_wl1) || in tcp_may_update_window()
3281 (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd); in tcp_may_update_window()
3285 static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack) in tcp_snd_una_update() argument
3287 u32 delta = ack - tp->snd_una; in tcp_snd_una_update()
3289 u64_stats_update_begin(&tp->syncp); in tcp_snd_una_update()
3290 tp->bytes_acked += delta; in tcp_snd_una_update()
3291 u64_stats_update_end(&tp->syncp); in tcp_snd_una_update()
3292 tp->snd_una = ack; in tcp_snd_una_update()
3296 static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq) in tcp_rcv_nxt_update() argument
3298 u32 delta = seq - tp->rcv_nxt; in tcp_rcv_nxt_update()
3300 u64_stats_update_begin(&tp->syncp); in tcp_rcv_nxt_update()
3301 tp->bytes_received += delta; in tcp_rcv_nxt_update()
3302 u64_stats_update_end(&tp->syncp); in tcp_rcv_nxt_update()
3303 tp->rcv_nxt = seq; in tcp_rcv_nxt_update()
3314 struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_update_window() local
3319 nwin <<= tp->rx_opt.snd_wscale; in tcp_ack_update_window()
3321 if (tcp_may_update_window(tp, ack, ack_seq, nwin)) { in tcp_ack_update_window()
3323 tcp_update_wl(tp, ack_seq); in tcp_ack_update_window()
3325 if (tp->snd_wnd != nwin) { in tcp_ack_update_window()
3326 tp->snd_wnd = nwin; in tcp_ack_update_window()
3331 tp->pred_flags = 0; in tcp_ack_update_window()
3334 if (nwin > tp->max_window) { in tcp_ack_update_window()
3335 tp->max_window = nwin; in tcp_ack_update_window()
3341 tcp_snd_una_update(tp, ack); in tcp_ack_update_window()
3382 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_challenge_ack() local
3388 &tp->last_oow_ack_time)) in tcp_send_challenge_ack()
3403 static void tcp_store_ts_recent(struct tcp_sock *tp) in tcp_store_ts_recent() argument
3405 tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; in tcp_store_ts_recent()
3406 tp->rx_opt.ts_recent_stamp = get_seconds(); in tcp_store_ts_recent()
3409 static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) in tcp_replace_ts_recent() argument
3411 if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { in tcp_replace_ts_recent()
3419 if (tcp_paws_check(&tp->rx_opt, 0)) in tcp_replace_ts_recent()
3420 tcp_store_ts_recent(tp); in tcp_replace_ts_recent()
3431 struct tcp_sock *tp = tcp_sk(sk); in tcp_process_tlp_ack() local
3433 if (before(ack, tp->tlp_high_seq)) in tcp_process_tlp_ack()
3438 tp->tlp_high_seq = 0; in tcp_process_tlp_ack()
3439 } else if (after(ack, tp->tlp_high_seq)) { in tcp_process_tlp_ack()
3452 tp->tlp_high_seq = 0; in tcp_process_tlp_ack()
3468 struct tcp_sock *tp = tcp_sk(sk); in tcp_ack() local
3469 u32 prior_snd_una = tp->snd_una; in tcp_ack()
3474 int prior_packets = tp->packets_out; in tcp_ack()
3475 const int prior_unsacked = tp->packets_out - tp->sacked_out; in tcp_ack()
3487 if (before(ack, prior_snd_una - tp->max_window)) { in tcp_ack()
3497 if (after(ack, tp->snd_nxt)) in tcp_ack()
3509 prior_fackets = tp->fackets_out; in tcp_ack()
3515 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); in tcp_ack()
3522 tcp_update_wl(tp, ack_seq); in tcp_ack()
3523 tcp_snd_una_update(tp, ack); in tcp_ack()
3543 if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) { in tcp_ack()
3559 tp->rcv_tstamp = tcp_time_stamp; in tcp_ack()
3564 acked = tp->packets_out; in tcp_ack()
3567 acked -= tp->packets_out; in tcp_ack()
3578 if (tp->tlp_high_seq) in tcp_ack()
3604 if (tp->tlp_high_seq) in tcp_ack()
3609 SOCK_DEBUG(sk, "Ack %u after %u:%u\n", ack, tp->snd_una, tp->snd_nxt); in tcp_ack()
3623 SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt); in tcp_ack()
3759 static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th) in tcp_parse_aligned_timestamp() argument
3765 tp->rx_opt.saw_tstamp = 1; in tcp_parse_aligned_timestamp()
3767 tp->rx_opt.rcv_tsval = ntohl(*ptr); in tcp_parse_aligned_timestamp()
3770 tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset; in tcp_parse_aligned_timestamp()
3772 tp->rx_opt.rcv_tsecr = 0; in tcp_parse_aligned_timestamp()
3782 const struct tcphdr *th, struct tcp_sock *tp) in tcp_fast_parse_options() argument
3788 tp->rx_opt.saw_tstamp = 0; in tcp_fast_parse_options()
3790 } else if (tp->rx_opt.tstamp_ok && in tcp_fast_parse_options()
3792 if (tcp_parse_aligned_timestamp(tp, th)) in tcp_fast_parse_options()
3796 tcp_parse_options(skb, &tp->rx_opt, 1, NULL); in tcp_fast_parse_options()
3797 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) in tcp_fast_parse_options()
3798 tp->rx_opt.rcv_tsecr -= tp->tsoffset; in tcp_fast_parse_options()
3866 const struct tcp_sock *tp = tcp_sk(sk); in tcp_disordered_ack() local
3872 (th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) && in tcp_disordered_ack()
3875 ack == tp->snd_una && in tcp_disordered_ack()
3878 !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) && in tcp_disordered_ack()
3881 (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ); in tcp_disordered_ack()
3887 const struct tcp_sock *tp = tcp_sk(sk); in tcp_paws_discard() local
3889 return !tcp_paws_check(&tp->rx_opt, TCP_PAWS_WINDOW) && in tcp_paws_discard()
3906 static inline bool tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq) in tcp_sequence() argument
3908 return !before(end_seq, tp->rcv_wup) && in tcp_sequence()
3909 !after(seq, tp->rcv_nxt + tcp_receive_window(tp)); in tcp_sequence()
3953 struct tcp_sock *tp = tcp_sk(sk); in tcp_fin() local
4006 __skb_queue_purge(&tp->out_of_order_queue); in tcp_fin()
4007 if (tcp_is_sack(tp)) in tcp_fin()
4008 tcp_sack_reset(&tp->rx_opt); in tcp_fin()
4038 struct tcp_sock *tp = tcp_sk(sk); in tcp_dsack_set() local
4040 if (tcp_is_sack(tp) && sysctl_tcp_dsack) { in tcp_dsack_set()
4043 if (before(seq, tp->rcv_nxt)) in tcp_dsack_set()
4050 tp->rx_opt.dsack = 1; in tcp_dsack_set()
4051 tp->duplicate_sack[0].start_seq = seq; in tcp_dsack_set()
4052 tp->duplicate_sack[0].end_seq = end_seq; in tcp_dsack_set()
4058 struct tcp_sock *tp = tcp_sk(sk); in tcp_dsack_extend() local
4060 if (!tp->rx_opt.dsack) in tcp_dsack_extend()
4063 tcp_sack_extend(tp->duplicate_sack, seq, end_seq); in tcp_dsack_extend()
4068 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_dupack() local
4071 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { in tcp_send_dupack()
4075 if (tcp_is_sack(tp) && sysctl_tcp_dsack) { in tcp_send_dupack()
4078 if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) in tcp_send_dupack()
4079 end_seq = tp->rcv_nxt; in tcp_send_dupack()
4090 static void tcp_sack_maybe_coalesce(struct tcp_sock *tp) in tcp_sack_maybe_coalesce() argument
4093 struct tcp_sack_block *sp = &tp->selective_acks[0]; in tcp_sack_maybe_coalesce()
4099 for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;) { in tcp_sack_maybe_coalesce()
4106 tp->rx_opt.num_sacks--; in tcp_sack_maybe_coalesce()
4107 for (i = this_sack; i < tp->rx_opt.num_sacks; i++) in tcp_sack_maybe_coalesce()
4117 struct tcp_sock *tp = tcp_sk(sk); in tcp_sack_new_ofo_skb() local
4118 struct tcp_sack_block *sp = &tp->selective_acks[0]; in tcp_sack_new_ofo_skb()
4119 int cur_sacks = tp->rx_opt.num_sacks; in tcp_sack_new_ofo_skb()
4131 tcp_sack_maybe_coalesce(tp); in tcp_sack_new_ofo_skb()
4144 tp->rx_opt.num_sacks--; in tcp_sack_new_ofo_skb()
4154 tp->rx_opt.num_sacks++; in tcp_sack_new_ofo_skb()
4159 static void tcp_sack_remove(struct tcp_sock *tp) in tcp_sack_remove() argument
4161 struct tcp_sack_block *sp = &tp->selective_acks[0]; in tcp_sack_remove()
4162 int num_sacks = tp->rx_opt.num_sacks; in tcp_sack_remove()
4166 if (skb_queue_empty(&tp->out_of_order_queue)) { in tcp_sack_remove()
4167 tp->rx_opt.num_sacks = 0; in tcp_sack_remove()
4173 if (!before(tp->rcv_nxt, sp->start_seq)) { in tcp_sack_remove()
4177 WARN_ON(before(tp->rcv_nxt, sp->end_seq)); in tcp_sack_remove()
4181 tp->selective_acks[i-1] = tp->selective_acks[i]; in tcp_sack_remove()
4188 tp->rx_opt.num_sacks = num_sacks; in tcp_sack_remove()
4234 struct tcp_sock *tp = tcp_sk(sk); in tcp_ofo_queue() local
4235 __u32 dsack_high = tp->rcv_nxt; in tcp_ofo_queue()
4239 while ((skb = skb_peek(&tp->out_of_order_queue)) != NULL) { in tcp_ofo_queue()
4240 if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) in tcp_ofo_queue()
4250 __skb_unlink(skb, &tp->out_of_order_queue); in tcp_ofo_queue()
4251 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { in tcp_ofo_queue()
4257 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, in tcp_ofo_queue()
4262 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); in tcp_ofo_queue()
4297 struct tcp_sock *tp = tcp_sk(sk); in tcp_data_queue_ofo() local
4301 tcp_ecn_check_ce(tp, skb); in tcp_data_queue_ofo()
4310 tp->pred_flags = 0; in tcp_data_queue_ofo()
4315 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); in tcp_data_queue_ofo()
4317 skb1 = skb_peek_tail(&tp->out_of_order_queue); in tcp_data_queue_ofo()
4320 if (tcp_is_sack(tp)) { in tcp_data_queue_ofo()
4321 tp->rx_opt.num_sacks = 1; in tcp_data_queue_ofo()
4322 tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq; in tcp_data_queue_ofo()
4323 tp->selective_acks[0].end_seq = in tcp_data_queue_ofo()
4326 __skb_queue_head(&tp->out_of_order_queue, skb); in tcp_data_queue_ofo()
4337 __skb_queue_after(&tp->out_of_order_queue, skb1, skb); in tcp_data_queue_ofo()
4344 if (!tp->rx_opt.num_sacks || in tcp_data_queue_ofo()
4345 tp->selective_acks[0].end_seq != seq) in tcp_data_queue_ofo()
4349 tp->selective_acks[0].end_seq = end_seq; in tcp_data_queue_ofo()
4357 if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) { in tcp_data_queue_ofo()
4361 skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1); in tcp_data_queue_ofo()
4379 if (skb_queue_is_first(&tp->out_of_order_queue, in tcp_data_queue_ofo()
4384 &tp->out_of_order_queue, in tcp_data_queue_ofo()
4389 __skb_queue_head(&tp->out_of_order_queue, skb); in tcp_data_queue_ofo()
4391 __skb_queue_after(&tp->out_of_order_queue, skb1, skb); in tcp_data_queue_ofo()
4394 while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) { in tcp_data_queue_ofo()
4395 skb1 = skb_queue_next(&tp->out_of_order_queue, skb); in tcp_data_queue_ofo()
4404 __skb_unlink(skb1, &tp->out_of_order_queue); in tcp_data_queue_ofo()
4412 if (tcp_is_sack(tp)) in tcp_data_queue_ofo()
4490 struct tcp_sock *tp = tcp_sk(sk); in tcp_data_queue() local
4500 tcp_ecn_accept_cwr(tp, skb); in tcp_data_queue()
4502 tp->rx_opt.dsack = 0; in tcp_data_queue()
4508 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { in tcp_data_queue()
4509 if (tcp_receive_window(tp) == 0) in tcp_data_queue()
4513 if (tp->ucopy.task == current && in tcp_data_queue()
4514 tp->copied_seq == tp->rcv_nxt && tp->ucopy.len && in tcp_data_queue()
4515 sock_owned_by_user(sk) && !tp->urg_data) { in tcp_data_queue()
4517 tp->ucopy.len); in tcp_data_queue()
4522 if (!skb_copy_datagram_msg(skb, 0, tp->ucopy.msg, chunk)) { in tcp_data_queue()
4523 tp->ucopy.len -= chunk; in tcp_data_queue()
4524 tp->copied_seq += chunk; in tcp_data_queue()
4539 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); in tcp_data_queue()
4545 if (!skb_queue_empty(&tp->out_of_order_queue)) { in tcp_data_queue()
4551 if (skb_queue_empty(&tp->out_of_order_queue)) in tcp_data_queue()
4555 if (tp->rx_opt.num_sacks) in tcp_data_queue()
4556 tcp_sack_remove(tp); in tcp_data_queue()
4567 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { in tcp_data_queue()
4581 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) in tcp_data_queue()
4586 if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { in tcp_data_queue()
4589 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, in tcp_data_queue()
4592 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); in tcp_data_queue()
4597 if (!tcp_receive_window(tp)) in tcp_data_queue()
4723 struct tcp_sock *tp = tcp_sk(sk); in tcp_collapse_ofo_queue() local
4724 struct sk_buff *skb = skb_peek(&tp->out_of_order_queue); in tcp_collapse_ofo_queue()
4738 if (!skb_queue_is_last(&tp->out_of_order_queue, skb)) in tcp_collapse_ofo_queue()
4739 next = skb_queue_next(&tp->out_of_order_queue, skb); in tcp_collapse_ofo_queue()
4747 tcp_collapse(sk, &tp->out_of_order_queue, in tcp_collapse_ofo_queue()
4770 struct tcp_sock *tp = tcp_sk(sk); in tcp_prune_ofo_queue() local
4773 if (!skb_queue_empty(&tp->out_of_order_queue)) { in tcp_prune_ofo_queue()
4775 __skb_queue_purge(&tp->out_of_order_queue); in tcp_prune_ofo_queue()
4782 if (tp->rx_opt.sack_ok) in tcp_prune_ofo_queue()
4783 tcp_sack_reset(&tp->rx_opt); in tcp_prune_ofo_queue()
4799 struct tcp_sock *tp = tcp_sk(sk); in tcp_prune_queue() local
4801 SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); in tcp_prune_queue()
4808 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); in tcp_prune_queue()
4815 tp->copied_seq, tp->rcv_nxt); in tcp_prune_queue()
4836 tp->pred_flags = 0; in tcp_prune_queue()
4842 const struct tcp_sock *tp = tcp_sk(sk); in tcp_should_expand_sndbuf() local
4859 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) in tcp_should_expand_sndbuf()
4873 struct tcp_sock *tp = tcp_sk(sk); in tcp_new_space() local
4877 tp->snd_cwnd_stamp = tcp_time_stamp; in tcp_new_space()
4906 struct tcp_sock *tp = tcp_sk(sk); in __tcp_ack_snd_check() local
4909 if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss && in __tcp_ack_snd_check()
4913 __tcp_select_window(sk) >= tp->rcv_wnd) || in __tcp_ack_snd_check()
4917 (ofo_possible && skb_peek(&tp->out_of_order_queue))) { in __tcp_ack_snd_check()
4947 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_urg() local
4955 if (after(tp->copied_seq, ptr)) in tcp_check_urg()
4968 if (before(ptr, tp->rcv_nxt)) in tcp_check_urg()
4972 if (tp->urg_data && !after(ptr, tp->urg_seq)) in tcp_check_urg()
4993 if (tp->urg_seq == tp->copied_seq && tp->urg_data && in tcp_check_urg()
4994 !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) { in tcp_check_urg()
4996 tp->copied_seq++; in tcp_check_urg()
4997 if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) { in tcp_check_urg()
5003 tp->urg_data = TCP_URG_NOTYET; in tcp_check_urg()
5004 tp->urg_seq = ptr; in tcp_check_urg()
5007 tp->pred_flags = 0; in tcp_check_urg()
5013 struct tcp_sock *tp = tcp_sk(sk); in tcp_urg() local
5020 if (tp->urg_data == TCP_URG_NOTYET) { in tcp_urg()
5021 u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) - in tcp_urg()
5029 tp->urg_data = TCP_URG_VALID | tmp; in tcp_urg()
5038 struct tcp_sock *tp = tcp_sk(sk); in tcp_copy_to_iovec() local
5044 err = skb_copy_datagram_msg(skb, hlen, tp->ucopy.msg, chunk); in tcp_copy_to_iovec()
5046 err = skb_copy_and_csum_datagram_msg(skb, hlen, tp->ucopy.msg); in tcp_copy_to_iovec()
5049 tp->ucopy.len -= chunk; in tcp_copy_to_iovec()
5050 tp->copied_seq += chunk; in tcp_copy_to_iovec()
5086 struct tcp_sock *tp = tcp_sk(sk); in tcp_validate_incoming() local
5089 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && in tcp_validate_incoming()
5095 &tp->last_oow_ack_time)) in tcp_validate_incoming()
5103 if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) { in tcp_validate_incoming()
5115 &tp->last_oow_ack_time)) in tcp_validate_incoming()
5129 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) in tcp_validate_incoming()
5183 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_established() local
5202 tp->rx_opt.saw_tstamp = 0; in tcp_rcv_established()
5213 if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags && in tcp_rcv_established()
5214 TCP_SKB_CB(skb)->seq == tp->rcv_nxt && in tcp_rcv_established()
5215 !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) { in tcp_rcv_established()
5216 int tcp_header_len = tp->tcp_header_len; in tcp_rcv_established()
5226 if (!tcp_parse_aligned_timestamp(tp, th)) in tcp_rcv_established()
5230 if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0) in tcp_rcv_established()
5249 tp->rcv_nxt == tp->rcv_wup) in tcp_rcv_established()
5250 tcp_store_ts_recent(tp); in tcp_rcv_established()
5267 if (tp->ucopy.task == current && in tcp_rcv_established()
5268 tp->copied_seq == tp->rcv_nxt && in tcp_rcv_established()
5269 len - tcp_header_len <= tp->ucopy.len && in tcp_rcv_established()
5281 tp->rcv_nxt == tp->rcv_wup) in tcp_rcv_established()
5282 tcp_store_ts_recent(tp); in tcp_rcv_established()
5287 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); in tcp_rcv_established()
5305 tp->rcv_nxt == tp->rcv_wup) in tcp_rcv_established()
5306 tcp_store_ts_recent(tp); in tcp_rcv_established()
5319 if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) { in tcp_rcv_established()
5377 struct tcp_sock *tp = tcp_sk(sk); in tcp_finish_connect() local
5397 tp->lsndtime = tcp_time_stamp; in tcp_finish_connect()
5402 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); in tcp_finish_connect()
5404 if (!tp->rx_opt.snd_wscale) in tcp_finish_connect()
5405 __tcp_fast_path_on(tp, tp->snd_wnd); in tcp_finish_connect()
5407 tp->pred_flags = 0; in tcp_finish_connect()
5418 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_fastopen_synack() local
5419 struct sk_buff *data = tp->syn_data ? tcp_write_queue_head(sk) : NULL; in tcp_rcv_fastopen_synack()
5420 u16 mss = tp->rx_opt.mss_clamp, try_exp = 0; in tcp_rcv_fastopen_synack()
5423 if (mss == tp->rx_opt.user_mss) { in tcp_rcv_fastopen_synack()
5433 if (!tp->syn_fastopen) { in tcp_rcv_fastopen_synack()
5436 } else if (tp->total_retrans) { in tcp_rcv_fastopen_synack()
5443 } else if (cookie->len < 0 && !tp->syn_data) { in tcp_rcv_fastopen_synack()
5448 try_exp = tp->syn_fastopen_exp ? 2 : 1; in tcp_rcv_fastopen_synack()
5463 tp->syn_data_acked = tp->syn_data; in tcp_rcv_fastopen_synack()
5464 if (tp->syn_data_acked) in tcp_rcv_fastopen_synack()
5473 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_synsent_state_process() local
5475 int saved_clamp = tp->rx_opt.mss_clamp; in tcp_rcv_synsent_state_process()
5477 tcp_parse_options(skb, &tp->rx_opt, 0, &foc); in tcp_rcv_synsent_state_process()
5478 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) in tcp_rcv_synsent_state_process()
5479 tp->rx_opt.rcv_tsecr -= tp->tsoffset; in tcp_rcv_synsent_state_process()
5490 if (!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_una) || in tcp_rcv_synsent_state_process()
5491 after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) in tcp_rcv_synsent_state_process()
5494 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && in tcp_rcv_synsent_state_process()
5495 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, in tcp_rcv_synsent_state_process()
5531 tcp_ecn_rcv_synack(tp, th); in tcp_rcv_synsent_state_process()
5533 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); in tcp_rcv_synsent_state_process()
5539 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; in tcp_rcv_synsent_state_process()
5540 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; in tcp_rcv_synsent_state_process()
5545 tp->snd_wnd = ntohs(th->window); in tcp_rcv_synsent_state_process()
5547 if (!tp->rx_opt.wscale_ok) { in tcp_rcv_synsent_state_process()
5548 tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0; in tcp_rcv_synsent_state_process()
5549 tp->window_clamp = min(tp->window_clamp, 65535U); in tcp_rcv_synsent_state_process()
5552 if (tp->rx_opt.saw_tstamp) { in tcp_rcv_synsent_state_process()
5553 tp->rx_opt.tstamp_ok = 1; in tcp_rcv_synsent_state_process()
5554 tp->tcp_header_len = in tcp_rcv_synsent_state_process()
5556 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; in tcp_rcv_synsent_state_process()
5557 tcp_store_ts_recent(tp); in tcp_rcv_synsent_state_process()
5559 tp->tcp_header_len = sizeof(struct tcphdr); in tcp_rcv_synsent_state_process()
5562 if (tcp_is_sack(tp) && sysctl_tcp_fack) in tcp_rcv_synsent_state_process()
5563 tcp_enable_fack(tp); in tcp_rcv_synsent_state_process()
5572 tp->copied_seq = tp->rcv_nxt; in tcp_rcv_synsent_state_process()
5578 if ((tp->syn_fastopen || tp->syn_data) && in tcp_rcv_synsent_state_process()
5620 if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp && in tcp_rcv_synsent_state_process()
5621 tcp_paws_reject(&tp->rx_opt, 0)) in tcp_rcv_synsent_state_process()
5631 if (tp->rx_opt.saw_tstamp) { in tcp_rcv_synsent_state_process()
5632 tp->rx_opt.tstamp_ok = 1; in tcp_rcv_synsent_state_process()
5633 tcp_store_ts_recent(tp); in tcp_rcv_synsent_state_process()
5634 tp->tcp_header_len = in tcp_rcv_synsent_state_process()
5637 tp->tcp_header_len = sizeof(struct tcphdr); in tcp_rcv_synsent_state_process()
5640 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; in tcp_rcv_synsent_state_process()
5641 tp->copied_seq = tp->rcv_nxt; in tcp_rcv_synsent_state_process()
5642 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; in tcp_rcv_synsent_state_process()
5647 tp->snd_wnd = ntohs(th->window); in tcp_rcv_synsent_state_process()
5648 tp->snd_wl1 = TCP_SKB_CB(skb)->seq; in tcp_rcv_synsent_state_process()
5649 tp->max_window = tp->snd_wnd; in tcp_rcv_synsent_state_process()
5651 tcp_ecn_rcv_syn(tp, th); in tcp_rcv_synsent_state_process()
5680 tcp_clear_options(&tp->rx_opt); in tcp_rcv_synsent_state_process()
5681 tp->rx_opt.mss_clamp = saved_clamp; in tcp_rcv_synsent_state_process()
5685 tcp_clear_options(&tp->rx_opt); in tcp_rcv_synsent_state_process()
5686 tp->rx_opt.mss_clamp = saved_clamp; in tcp_rcv_synsent_state_process()
5700 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_state_process() local
5707 tp->rx_opt.saw_tstamp = 0; in tcp_rcv_state_process()
5760 req = tp->fastopen_rsk; in tcp_rcv_state_process()
5789 tp->total_retrans = req->num_retrans; in tcp_rcv_state_process()
5792 synack_stamp = tp->lsndtime; in tcp_rcv_state_process()
5798 tp->copied_seq = tp->rcv_nxt; in tcp_rcv_state_process()
5812 tp->snd_una = TCP_SKB_CB(skb)->ack_seq; in tcp_rcv_state_process()
5813 tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale; in tcp_rcv_state_process()
5814 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); in tcp_rcv_state_process()
5817 if (tp->rx_opt.tstamp_ok) in tcp_rcv_state_process()
5818 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; in tcp_rcv_state_process()
5836 tp->lsndtime = tcp_time_stamp; in tcp_rcv_state_process()
5839 tcp_fast_path_on(tp); in tcp_rcv_state_process()
5864 if (tp->snd_una != tp->write_seq) in tcp_rcv_state_process()
5880 if (tp->linger2 < 0 || in tcp_rcv_state_process()
5882 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) { in tcp_rcv_state_process()
5907 if (tp->snd_una == tp->write_seq) { in tcp_rcv_state_process()
5914 if (tp->snd_una == tp->write_seq) { in tcp_rcv_state_process()
5930 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) in tcp_rcv_state_process()
5940 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { in tcp_rcv_state_process()
6093 struct tcp_sock *tp = tcp_sk(sk); in tcp_conn_request() local
6132 tmp_opt.user_mss = tp->rx_opt.user_mss; in tcp_conn_request()