Lines Matching refs:tp
75 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_new_data_sent() local
76 unsigned int prior_packets = tp->packets_out; in tcp_event_new_data_sent()
79 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; in tcp_event_new_data_sent()
81 tp->packets_out += tcp_skb_pcount(skb); in tcp_event_new_data_sent()
99 const struct tcp_sock *tp = tcp_sk(sk); in tcp_acceptable_seq() local
101 if (!before(tcp_wnd_end(tp), tp->snd_nxt)) in tcp_acceptable_seq()
102 return tp->snd_nxt; in tcp_acceptable_seq()
104 return tcp_wnd_end(tp); in tcp_acceptable_seq()
123 struct tcp_sock *tp = tcp_sk(sk); in tcp_advertise_mss() local
125 int mss = tp->advmss; in tcp_advertise_mss()
132 tp->advmss = mss; in tcp_advertise_mss()
144 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_restart() local
145 u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk)); in tcp_cwnd_restart()
146 u32 cwnd = tp->snd_cwnd; in tcp_cwnd_restart()
150 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_cwnd_restart()
155 tp->snd_cwnd = max(cwnd, restart_cwnd); in tcp_cwnd_restart()
156 tp->snd_cwnd_stamp = tcp_time_stamp; in tcp_cwnd_restart()
157 tp->snd_cwnd_used = 0; in tcp_cwnd_restart()
161 static void tcp_event_data_sent(struct tcp_sock *tp, in tcp_event_data_sent() argument
167 if (tcp_packets_in_flight(tp) == 0) in tcp_event_data_sent()
170 tp->lsndtime = now; in tcp_event_data_sent()
268 struct tcp_sock *tp = tcp_sk(sk); in tcp_select_window() local
269 u32 old_win = tp->rcv_wnd; in tcp_select_window()
270 u32 cur_win = tcp_receive_window(tp); in tcp_select_window()
285 new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); in tcp_select_window()
287 tp->rcv_wnd = new_win; in tcp_select_window()
288 tp->rcv_wup = tp->rcv_nxt; in tcp_select_window()
293 if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows) in tcp_select_window()
296 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); in tcp_select_window()
299 new_win >>= tp->rx_opt.rcv_wscale; in tcp_select_window()
303 tp->pred_flags = 0; in tcp_select_window()
317 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send_synack() local
320 if (!(tp->ecn_flags & TCP_ECN_OK)) in tcp_ecn_send_synack()
329 struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send_syn() local
340 tp->ecn_flags = 0; in tcp_ecn_send_syn()
344 tp->ecn_flags = TCP_ECN_OK; in tcp_ecn_send_syn()
372 struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send() local
374 if (tp->ecn_flags & TCP_ECN_OK) { in tcp_ecn_send()
377 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { in tcp_ecn_send()
379 if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) { in tcp_ecn_send()
380 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; in tcp_ecn_send()
388 if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) in tcp_ecn_send()
412 static inline bool tcp_urg_mode(const struct tcp_sock *tp) in tcp_urg_mode() argument
414 return tp->snd_una != tp->snd_up; in tcp_urg_mode()
447 static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, in tcp_options_write() argument
498 struct tcp_sack_block *sp = tp->rx_opt.dsack ? in tcp_options_write()
499 tp->duplicate_sack : tp->selective_acks; in tcp_options_write()
514 tp->rx_opt.dsack = 0; in tcp_options_write()
549 struct tcp_sock *tp = tcp_sk(sk); in tcp_syn_options() local
551 struct tcp_fastopen_request *fastopen = tp->fastopen_req; in tcp_syn_options()
554 *md5 = tp->af_specific->md5_lookup(sk, sk); in tcp_syn_options()
577 opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset; in tcp_syn_options()
578 opts->tsecr = tp->rx_opt.ts_recent; in tcp_syn_options()
582 opts->ws = tp->rx_opt.rcv_wscale; in tcp_syn_options()
602 tp->syn_fastopen = 1; in tcp_syn_options()
603 tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0; in tcp_syn_options()
677 struct tcp_sock *tp = tcp_sk(sk); in tcp_established_options() local
684 *md5 = tp->af_specific->md5_lookup(sk, sk); in tcp_established_options()
693 if (likely(tp->rx_opt.tstamp_ok)) { in tcp_established_options()
695 opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0; in tcp_established_options()
696 opts->tsecr = tp->rx_opt.ts_recent; in tcp_established_options()
700 eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; in tcp_established_options()
755 struct tcp_sock *tp; in tcp_tasklet_func() local
763 tp = list_entry(q, struct tcp_sock, tsq_node); in tcp_tasklet_func()
764 list_del(&tp->tsq_node); in tcp_tasklet_func()
766 sk = (struct sock *)tp; in tcp_tasklet_func()
773 set_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags); in tcp_tasklet_func()
777 clear_bit(TSQ_QUEUED, &tp->tsq_flags); in tcp_tasklet_func()
795 struct tcp_sock *tp = tcp_sk(sk); in tcp_release_cb() local
800 flags = tp->tsq_flags; in tcp_release_cb()
804 } while (cmpxchg(&tp->tsq_flags, flags, nflags) != flags); in tcp_release_cb()
857 struct tcp_sock *tp = tcp_sk(sk); in tcp_wfree() local
875 if (test_and_clear_bit(TSQ_THROTTLED, &tp->tsq_flags) && in tcp_wfree()
876 !test_and_set_bit(TSQ_QUEUED, &tp->tsq_flags)) { in tcp_wfree()
883 list_add(&tp->tsq_node, &tsq->head); in tcp_wfree()
908 struct tcp_sock *tp; in tcp_transmit_skb() local
930 tp = tcp_sk(sk); in tcp_transmit_skb()
964 th->ack_seq = htonl(tp->rcv_nxt); in tcp_transmit_skb()
972 th->window = htons(min(tp->rcv_wnd, 65535U)); in tcp_transmit_skb()
980 if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) { in tcp_transmit_skb()
981 if (before(tp->snd_up, tcb->seq + 0x10000)) { in tcp_transmit_skb()
982 th->urg_ptr = htons(tp->snd_up - tcb->seq); in tcp_transmit_skb()
984 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { in tcp_transmit_skb()
990 tcp_options_write((__be32 *)(th + 1), tp, &opts); in tcp_transmit_skb()
999 tp->af_specific->calc_md5_hash(opts.hash_location, in tcp_transmit_skb()
1010 tcp_event_data_sent(tp, sk); in tcp_transmit_skb()
1012 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) in tcp_transmit_skb()
1016 tp->segs_out += tcp_skb_pcount(skb); in tcp_transmit_skb()
1045 struct tcp_sock *tp = tcp_sk(sk); in tcp_queue_skb() local
1048 tp->write_seq = TCP_SKB_CB(skb)->end_seq; in tcp_queue_skb()
1076 struct tcp_sock *tp = tcp_sk(sk); in tcp_adjust_fackets_out() local
1078 if (!tp->sacked_out || tcp_is_reno(tp)) in tcp_adjust_fackets_out()
1081 if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq)) in tcp_adjust_fackets_out()
1082 tp->fackets_out -= decr; in tcp_adjust_fackets_out()
1090 struct tcp_sock *tp = tcp_sk(sk); in tcp_adjust_pcount() local
1092 tp->packets_out -= decr; in tcp_adjust_pcount()
1095 tp->sacked_out -= decr; in tcp_adjust_pcount()
1097 tp->retrans_out -= decr; in tcp_adjust_pcount()
1099 tp->lost_out -= decr; in tcp_adjust_pcount()
1102 if (tcp_is_reno(tp) && decr > 0) in tcp_adjust_pcount()
1103 tp->sacked_out -= min_t(u32, tp->sacked_out, decr); in tcp_adjust_pcount()
1107 if (tp->lost_skb_hint && in tcp_adjust_pcount()
1108 before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) && in tcp_adjust_pcount()
1109 (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))) in tcp_adjust_pcount()
1110 tp->lost_cnt_hint -= decr; in tcp_adjust_pcount()
1112 tcp_verify_left_out(tp); in tcp_adjust_pcount()
1138 struct tcp_sock *tp = tcp_sk(sk); in tcp_fragment() local
1204 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { in tcp_fragment()
1287 const struct tcp_sock *tp = tcp_sk(sk); in __tcp_mtu_to_mss() local
1305 if (mss_now > tp->rx_opt.mss_clamp) in __tcp_mtu_to_mss()
1306 mss_now = tp->rx_opt.mss_clamp; in __tcp_mtu_to_mss()
1328 const struct tcp_sock *tp = tcp_sk(sk); in tcp_mss_to_mtu() local
1333 tp->tcp_header_len + in tcp_mss_to_mtu()
1350 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtup_init() local
1355 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + in tcp_mtup_init()
1388 struct tcp_sock *tp = tcp_sk(sk); in tcp_sync_mss() local
1396 mss_now = tcp_bound_to_half_wnd(tp, mss_now); in tcp_sync_mss()
1402 tp->mss_cache = mss_now; in tcp_sync_mss()
1413 const struct tcp_sock *tp = tcp_sk(sk); in tcp_current_mss() local
1420 mss_now = tp->mss_cache; in tcp_current_mss()
1434 if (header_len != tp->tcp_header_len) { in tcp_current_mss()
1435 int delta = (int) header_len - tp->tcp_header_len; in tcp_current_mss()
1448 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_application_limited() local
1453 u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk)); in tcp_cwnd_application_limited()
1454 u32 win_used = max(tp->snd_cwnd_used, init_win); in tcp_cwnd_application_limited()
1455 if (win_used < tp->snd_cwnd) { in tcp_cwnd_application_limited()
1456 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_cwnd_application_limited()
1457 tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1; in tcp_cwnd_application_limited()
1459 tp->snd_cwnd_used = 0; in tcp_cwnd_application_limited()
1461 tp->snd_cwnd_stamp = tcp_time_stamp; in tcp_cwnd_application_limited()
1466 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_validate() local
1471 if (!before(tp->snd_una, tp->max_packets_seq) || in tcp_cwnd_validate()
1472 tp->packets_out > tp->max_packets_out) { in tcp_cwnd_validate()
1473 tp->max_packets_out = tp->packets_out; in tcp_cwnd_validate()
1474 tp->max_packets_seq = tp->snd_nxt; in tcp_cwnd_validate()
1475 tp->is_cwnd_limited = is_cwnd_limited; in tcp_cwnd_validate()
1480 tp->snd_cwnd_used = 0; in tcp_cwnd_validate()
1481 tp->snd_cwnd_stamp = tcp_time_stamp; in tcp_cwnd_validate()
1484 if (tp->packets_out > tp->snd_cwnd_used) in tcp_cwnd_validate()
1485 tp->snd_cwnd_used = tp->packets_out; in tcp_cwnd_validate()
1488 (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto) in tcp_cwnd_validate()
1494 static bool tcp_minshall_check(const struct tcp_sock *tp) in tcp_minshall_check() argument
1496 return after(tp->snd_sml, tp->snd_una) && in tcp_minshall_check()
1497 !after(tp->snd_sml, tp->snd_nxt); in tcp_minshall_check()
1508 static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now, in tcp_minshall_update() argument
1512 tp->snd_sml = TCP_SKB_CB(skb)->end_seq; in tcp_minshall_update()
1522 static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp, in tcp_nagle_check() argument
1527 (!nonagle && tp->packets_out && tcp_minshall_check(tp))); in tcp_nagle_check()
1557 const struct tcp_sock *tp = tcp_sk(sk); in tcp_mss_split_point() local
1560 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_mss_split_point()
1576 if (tcp_nagle_check(partial != 0, tp, nonagle)) in tcp_mss_split_point()
1585 static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp, in tcp_cwnd_test() argument
1595 in_flight = tcp_packets_in_flight(tp); in tcp_cwnd_test()
1596 cwnd = tp->snd_cwnd; in tcp_cwnd_test()
1626 static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, in tcp_nagle_test() argument
1639 if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) in tcp_nagle_test()
1642 if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle)) in tcp_nagle_test()
1649 static bool tcp_snd_wnd_test(const struct tcp_sock *tp, in tcp_snd_wnd_test() argument
1658 return !after(end_seq, tcp_wnd_end(tp)); in tcp_snd_wnd_test()
1668 const struct tcp_sock *tp = tcp_sk(sk); in tcp_snd_test() local
1673 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle)) in tcp_snd_test()
1676 cwnd_quota = tcp_cwnd_test(tp, skb); in tcp_snd_test()
1677 if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss)) in tcp_snd_test()
1686 const struct tcp_sock *tp = tcp_sk(sk); in tcp_may_send_now() local
1692 tp->nonagle : TCP_NAGLE_PUSH)); in tcp_may_send_now()
1760 struct tcp_sock *tp = tcp_sk(sk); in tcp_tso_should_defer() local
1774 if ((s32)(tcp_time_stamp - tp->lsndtime) > 0) in tcp_tso_should_defer()
1777 in_flight = tcp_packets_in_flight(tp); in tcp_tso_should_defer()
1779 BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight)); in tcp_tso_should_defer()
1781 send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_tso_should_defer()
1784 cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache; in tcp_tso_should_defer()
1789 if (limit >= max_segs * tp->mss_cache) in tcp_tso_should_defer()
1798 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); in tcp_tso_should_defer()
1812 if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache) in tcp_tso_should_defer()
1820 if (age < (tp->srtt_us >> 4)) in tcp_tso_should_defer()
1837 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtu_check_reprobe() local
1849 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + in tcp_mtu_check_reprobe()
1870 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtu_probe() local
1888 tp->snd_cwnd < 11 || in tcp_mtu_probe()
1889 tp->rx_opt.num_sacks || tp->rx_opt.dsack) in tcp_mtu_probe()
1899 size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; in tcp_mtu_probe()
1915 if (tp->write_seq - tp->snd_nxt < size_needed) in tcp_mtu_probe()
1918 if (tp->snd_wnd < size_needed) in tcp_mtu_probe()
1920 if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp))) in tcp_mtu_probe()
1924 if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) { in tcp_mtu_probe()
1925 if (!tcp_packets_in_flight(tp)) in tcp_mtu_probe()
1993 tp->snd_cwnd--; in tcp_mtu_probe()
1997 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; in tcp_mtu_probe()
1998 tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq; in tcp_mtu_probe()
2023 struct tcp_sock *tp = tcp_sk(sk); in tcp_write_xmit() local
2050 if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { in tcp_write_xmit()
2056 cwnd_quota = tcp_cwnd_test(tp, skb); in tcp_write_xmit()
2065 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) in tcp_write_xmit()
2069 if (unlikely(!tcp_nagle_test(tp, skb, mss_now, in tcp_write_xmit()
2081 if (tso_segs > 1 && !tcp_urg_mode(tp)) in tcp_write_xmit()
2106 set_bit(TSQ_THROTTLED, &tp->tsq_flags); in tcp_write_xmit()
2125 tcp_minshall_update(tp, mss_now, skb); in tcp_write_xmit()
2134 tp->prr_out += sent_pkts; in tcp_write_xmit()
2139 is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd); in tcp_write_xmit()
2143 return !tp->packets_out && tcp_send_head(sk); in tcp_write_xmit()
2149 struct tcp_sock *tp = tcp_sk(sk); in tcp_schedule_loss_probe() local
2151 u32 rtt = usecs_to_jiffies(tp->srtt_us >> 3); in tcp_schedule_loss_probe()
2163 if (tp->fastopen_rsk) in tcp_schedule_loss_probe()
2173 if (sysctl_tcp_early_retrans < 3 || !tp->packets_out || in tcp_schedule_loss_probe()
2174 !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open) in tcp_schedule_loss_probe()
2177 if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) && in tcp_schedule_loss_probe()
2186 if (tp->packets_out == 1) in tcp_schedule_loss_probe()
2226 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_loss_probe() local
2233 if (tcp_snd_wnd_test(tp, skb, mss)) { in tcp_send_loss_probe()
2234 pcount = tp->packets_out; in tcp_send_loss_probe()
2236 if (tp->packets_out > pcount) in tcp_send_loss_probe()
2246 if (tp->tlp_high_seq) in tcp_send_loss_probe()
2274 tp->tlp_high_seq = tp->snd_nxt; in tcp_send_loss_probe()
2370 struct tcp_sock *tp = tcp_sk(sk); in __tcp_select_window() local
2380 int full_space = min_t(int, tp->window_clamp, allowed_space); in __tcp_select_window()
2390 tp->rcv_ssthresh = min(tp->rcv_ssthresh, in __tcp_select_window()
2391 4U * tp->advmss); in __tcp_select_window()
2396 free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale); in __tcp_select_window()
2409 if (free_space > tp->rcv_ssthresh) in __tcp_select_window()
2410 free_space = tp->rcv_ssthresh; in __tcp_select_window()
2415 window = tp->rcv_wnd; in __tcp_select_window()
2416 if (tp->rx_opt.rcv_wscale) { in __tcp_select_window()
2423 if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window) in __tcp_select_window()
2424 window = (((window >> tp->rx_opt.rcv_wscale) + 1) in __tcp_select_window()
2425 << tp->rx_opt.rcv_wscale); in __tcp_select_window()
2448 struct tcp_sock *tp = tcp_sk(sk); in tcp_collapse_retrans() local
2482 tcp_clear_retrans_hints_partial(tp); in tcp_collapse_retrans()
2483 if (next_skb == tp->retransmit_skb_hint) in tcp_collapse_retrans()
2484 tp->retransmit_skb_hint = skb; in tcp_collapse_retrans()
2516 struct tcp_sock *tp = tcp_sk(sk); in tcp_retrans_try_collapse() local
2544 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) in tcp_retrans_try_collapse()
2557 struct tcp_sock *tp = tcp_sk(sk); in __tcp_retransmit_skb() local
2577 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { in __tcp_retransmit_skb()
2578 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) in __tcp_retransmit_skb()
2580 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) in __tcp_retransmit_skb()
2594 if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) && in __tcp_retransmit_skb()
2595 TCP_SKB_CB(skb)->seq != tp->snd_una) in __tcp_retransmit_skb()
2644 tp->total_retrans++; in __tcp_retransmit_skb()
2651 struct tcp_sock *tp = tcp_sk(sk); in tcp_retransmit_skb() local
2661 tp->retrans_out += tcp_skb_pcount(skb); in tcp_retransmit_skb()
2664 if (!tp->retrans_stamp) in tcp_retransmit_skb()
2665 tp->retrans_stamp = tcp_skb_timestamp(skb); in tcp_retransmit_skb()
2671 if (tp->undo_retrans < 0) in tcp_retransmit_skb()
2672 tp->undo_retrans = 0; in tcp_retransmit_skb()
2673 tp->undo_retrans += tcp_skb_pcount(skb); in tcp_retransmit_skb()
2683 const struct tcp_sock *tp = tcp_sk(sk); in tcp_can_forward_retransmit() local
2690 if (tcp_is_reno(tp)) in tcp_can_forward_retransmit()
2718 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_retransmit_queue() local
2725 if (!tp->packets_out) in tcp_xmit_retransmit_queue()
2728 if (!tp->lost_out) in tcp_xmit_retransmit_queue()
2729 tp->retransmit_high = tp->snd_una; in tcp_xmit_retransmit_queue()
2731 if (tp->retransmit_skb_hint) { in tcp_xmit_retransmit_queue()
2732 skb = tp->retransmit_skb_hint; in tcp_xmit_retransmit_queue()
2734 if (after(last_lost, tp->retransmit_high)) in tcp_xmit_retransmit_queue()
2735 last_lost = tp->retransmit_high; in tcp_xmit_retransmit_queue()
2738 last_lost = tp->snd_una; in tcp_xmit_retransmit_queue()
2748 tp->retransmit_skb_hint = skb; in tcp_xmit_retransmit_queue()
2757 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) in tcp_xmit_retransmit_queue()
2762 if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) in tcp_xmit_retransmit_queue()
2766 } else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) { in tcp_xmit_retransmit_queue()
2767 tp->retransmit_high = last_lost; in tcp_xmit_retransmit_queue()
2800 tp->prr_out += tcp_skb_pcount(skb); in tcp_xmit_retransmit_queue()
2833 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_fin() local
2844 tp->write_seq++; in tcp_send_fin()
2852 tp->snd_nxt++; in tcp_send_fin()
2865 tcp_init_nondata_skb(skb, tp->write_seq, in tcp_send_fin()
2950 const struct tcp_sock *tp = tcp_sk(sk); in tcp_make_synack() local
2979 user_mss = READ_ONCE(tp->rx_opt.user_mss); in tcp_make_synack()
3062 struct tcp_sock *tp = tcp_sk(sk); in tcp_connect_init() local
3068 tp->tcp_header_len = sizeof(struct tcphdr) + in tcp_connect_init()
3072 if (tp->af_specific->md5_lookup(sk, sk)) in tcp_connect_init()
3073 tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; in tcp_connect_init()
3077 if (tp->rx_opt.user_mss) in tcp_connect_init()
3078 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; in tcp_connect_init()
3079 tp->max_window = 0; in tcp_connect_init()
3085 if (!tp->window_clamp) in tcp_connect_init()
3086 tp->window_clamp = dst_metric(dst, RTAX_WINDOW); in tcp_connect_init()
3087 tp->advmss = dst_metric_advmss(dst); in tcp_connect_init()
3088 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss) in tcp_connect_init()
3089 tp->advmss = tp->rx_opt.user_mss; in tcp_connect_init()
3095 (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0)) in tcp_connect_init()
3096 tp->window_clamp = tcp_full_space(sk); in tcp_connect_init()
3099 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), in tcp_connect_init()
3100 &tp->rcv_wnd, in tcp_connect_init()
3101 &tp->window_clamp, in tcp_connect_init()
3106 tp->rx_opt.rcv_wscale = rcv_wscale; in tcp_connect_init()
3107 tp->rcv_ssthresh = tp->rcv_wnd; in tcp_connect_init()
3111 tp->snd_wnd = 0; in tcp_connect_init()
3112 tcp_init_wl(tp, 0); in tcp_connect_init()
3113 tp->snd_una = tp->write_seq; in tcp_connect_init()
3114 tp->snd_sml = tp->write_seq; in tcp_connect_init()
3115 tp->snd_up = tp->write_seq; in tcp_connect_init()
3116 tp->snd_nxt = tp->write_seq; in tcp_connect_init()
3118 if (likely(!tp->repair)) in tcp_connect_init()
3119 tp->rcv_nxt = 0; in tcp_connect_init()
3121 tp->rcv_tstamp = tcp_time_stamp; in tcp_connect_init()
3122 tp->rcv_wup = tp->rcv_nxt; in tcp_connect_init()
3123 tp->copied_seq = tp->rcv_nxt; in tcp_connect_init()
3127 tcp_clear_retrans(tp); in tcp_connect_init()
3132 struct tcp_sock *tp = tcp_sk(sk); in tcp_connect_queue_skb() local
3140 tp->write_seq = tcb->end_seq; in tcp_connect_queue_skb()
3141 tp->packets_out += tcp_skb_pcount(skb); in tcp_connect_queue_skb()
3153 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_syn_data() local
3154 struct tcp_fastopen_request *fo = tp->fastopen_req; in tcp_send_syn_data()
3159 tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */ in tcp_send_syn_data()
3160 tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie, in tcp_send_syn_data()
3178 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp) in tcp_send_syn_data()
3179 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; in tcp_send_syn_data()
3224 tp->syn_data = (fo->copied > 0); in tcp_send_syn_data()
3235 tp->syn_fastopen = 0; in tcp_send_syn_data()
3244 struct tcp_sock *tp = tcp_sk(sk); in tcp_connect() local
3250 if (unlikely(tp->repair)) { in tcp_connect()
3259 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); in tcp_connect()
3260 tp->retrans_stamp = tcp_time_stamp; in tcp_connect()
3265 err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) : in tcp_connect()
3273 tp->snd_nxt = tp->write_seq; in tcp_connect()
3274 tp->pushed_seq = tp->write_seq; in tcp_connect()
3297 const struct tcp_sock *tp = tcp_sk(sk); in tcp_send_delayed_ack() local
3310 if (tp->srtt_us) { in tcp_send_delayed_ack()
3311 int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3), in tcp_send_delayed_ack()
3398 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_probe_skb() local
3412 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); in tcp_xmit_probe_skb()
3429 struct tcp_sock *tp = tcp_sk(sk); in tcp_write_wakeup() local
3436 if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { in tcp_write_wakeup()
3439 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_write_wakeup()
3441 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) in tcp_write_wakeup()
3442 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; in tcp_write_wakeup()
3463 if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF)) in tcp_write_wakeup()
3475 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_probe0() local
3481 if (tp->packets_out || !tcp_send_head(sk)) { in tcp_send_probe0()