Lines Matching refs:tp
75 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_new_data_sent() local
76 unsigned int prior_packets = tp->packets_out; in tcp_event_new_data_sent()
79 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; in tcp_event_new_data_sent()
81 tp->packets_out += tcp_skb_pcount(skb); in tcp_event_new_data_sent()
99 const struct tcp_sock *tp = tcp_sk(sk); in tcp_acceptable_seq() local
101 if (!before(tcp_wnd_end(tp), tp->snd_nxt)) in tcp_acceptable_seq()
102 return tp->snd_nxt; in tcp_acceptable_seq()
104 return tcp_wnd_end(tp); in tcp_acceptable_seq()
123 struct tcp_sock *tp = tcp_sk(sk); in tcp_advertise_mss() local
125 int mss = tp->advmss; in tcp_advertise_mss()
132 tp->advmss = mss; in tcp_advertise_mss()
143 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_restart() local
144 s32 delta = tcp_time_stamp - tp->lsndtime; in tcp_cwnd_restart()
145 u32 restart_cwnd = tcp_init_cwnd(tp, dst); in tcp_cwnd_restart()
146 u32 cwnd = tp->snd_cwnd; in tcp_cwnd_restart()
150 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_cwnd_restart()
155 tp->snd_cwnd = max(cwnd, restart_cwnd); in tcp_cwnd_restart()
156 tp->snd_cwnd_stamp = tcp_time_stamp; in tcp_cwnd_restart()
157 tp->snd_cwnd_used = 0; in tcp_cwnd_restart()
161 static void tcp_event_data_sent(struct tcp_sock *tp, in tcp_event_data_sent() argument
169 (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto)) in tcp_event_data_sent()
172 tp->lsndtime = now; in tcp_event_data_sent()
271 struct tcp_sock *tp = tcp_sk(sk); in tcp_select_window() local
272 u32 old_win = tp->rcv_wnd; in tcp_select_window()
273 u32 cur_win = tcp_receive_window(tp); in tcp_select_window()
288 new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); in tcp_select_window()
290 tp->rcv_wnd = new_win; in tcp_select_window()
291 tp->rcv_wup = tp->rcv_nxt; in tcp_select_window()
296 if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows) in tcp_select_window()
299 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); in tcp_select_window()
302 new_win >>= tp->rx_opt.rcv_wscale; in tcp_select_window()
306 tp->pred_flags = 0; in tcp_select_window()
320 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send_synack() local
323 if (!(tp->ecn_flags & TCP_ECN_OK)) in tcp_ecn_send_synack()
332 struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send_syn() local
343 tp->ecn_flags = 0; in tcp_ecn_send_syn()
347 tp->ecn_flags = TCP_ECN_OK; in tcp_ecn_send_syn()
370 struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send() local
372 if (tp->ecn_flags & TCP_ECN_OK) { in tcp_ecn_send()
375 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { in tcp_ecn_send()
377 if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) { in tcp_ecn_send()
378 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; in tcp_ecn_send()
386 if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) in tcp_ecn_send()
414 static inline bool tcp_urg_mode(const struct tcp_sock *tp) in tcp_urg_mode() argument
416 return tp->snd_una != tp->snd_up; in tcp_urg_mode()
449 static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, in tcp_options_write() argument
500 struct tcp_sack_block *sp = tp->rx_opt.dsack ? in tcp_options_write()
501 tp->duplicate_sack : tp->selective_acks; in tcp_options_write()
516 tp->rx_opt.dsack = 0; in tcp_options_write()
551 struct tcp_sock *tp = tcp_sk(sk); in tcp_syn_options() local
553 struct tcp_fastopen_request *fastopen = tp->fastopen_req; in tcp_syn_options()
556 *md5 = tp->af_specific->md5_lookup(sk, sk); in tcp_syn_options()
579 opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset; in tcp_syn_options()
580 opts->tsecr = tp->rx_opt.ts_recent; in tcp_syn_options()
584 opts->ws = tp->rx_opt.rcv_wscale; in tcp_syn_options()
604 tp->syn_fastopen = 1; in tcp_syn_options()
605 tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0; in tcp_syn_options()
680 struct tcp_sock *tp = tcp_sk(sk); in tcp_established_options() local
687 *md5 = tp->af_specific->md5_lookup(sk, sk); in tcp_established_options()
696 if (likely(tp->rx_opt.tstamp_ok)) { in tcp_established_options()
698 opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0; in tcp_established_options()
699 opts->tsecr = tp->rx_opt.ts_recent; in tcp_established_options()
703 eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; in tcp_established_options()
758 struct tcp_sock *tp; in tcp_tasklet_func() local
766 tp = list_entry(q, struct tcp_sock, tsq_node); in tcp_tasklet_func()
767 list_del(&tp->tsq_node); in tcp_tasklet_func()
769 sk = (struct sock *)tp; in tcp_tasklet_func()
776 set_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags); in tcp_tasklet_func()
780 clear_bit(TSQ_QUEUED, &tp->tsq_flags); in tcp_tasklet_func()
798 struct tcp_sock *tp = tcp_sk(sk); in tcp_release_cb() local
803 flags = tp->tsq_flags; in tcp_release_cb()
807 } while (cmpxchg(&tp->tsq_flags, flags, nflags) != flags); in tcp_release_cb()
860 struct tcp_sock *tp = tcp_sk(sk); in tcp_wfree() local
878 if (test_and_clear_bit(TSQ_THROTTLED, &tp->tsq_flags) && in tcp_wfree()
879 !test_and_set_bit(TSQ_QUEUED, &tp->tsq_flags)) { in tcp_wfree()
886 list_add(&tp->tsq_node, &tsq->head); in tcp_wfree()
911 struct tcp_sock *tp; in tcp_transmit_skb() local
933 tp = tcp_sk(sk); in tcp_transmit_skb()
944 if (tcp_packets_in_flight(tp) == 0) in tcp_transmit_skb()
970 th->ack_seq = htonl(tp->rcv_nxt); in tcp_transmit_skb()
978 th->window = htons(min(tp->rcv_wnd, 65535U)); in tcp_transmit_skb()
986 if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) { in tcp_transmit_skb()
987 if (before(tp->snd_up, tcb->seq + 0x10000)) { in tcp_transmit_skb()
988 th->urg_ptr = htons(tp->snd_up - tcb->seq); in tcp_transmit_skb()
990 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { in tcp_transmit_skb()
996 tcp_options_write((__be32 *)(th + 1), tp, &opts); in tcp_transmit_skb()
1004 tp->af_specific->calc_md5_hash(opts.hash_location, in tcp_transmit_skb()
1015 tcp_event_data_sent(tp, sk); in tcp_transmit_skb()
1017 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) in tcp_transmit_skb()
1048 struct tcp_sock *tp = tcp_sk(sk); in tcp_queue_skb() local
1051 tp->write_seq = TCP_SKB_CB(skb)->end_seq; in tcp_queue_skb()
1087 struct tcp_sock *tp = tcp_sk(sk); in tcp_adjust_fackets_out() local
1089 if (!tp->sacked_out || tcp_is_reno(tp)) in tcp_adjust_fackets_out()
1092 if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq)) in tcp_adjust_fackets_out()
1093 tp->fackets_out -= decr; in tcp_adjust_fackets_out()
1101 struct tcp_sock *tp = tcp_sk(sk); in tcp_adjust_pcount() local
1103 tp->packets_out -= decr; in tcp_adjust_pcount()
1106 tp->sacked_out -= decr; in tcp_adjust_pcount()
1108 tp->retrans_out -= decr; in tcp_adjust_pcount()
1110 tp->lost_out -= decr; in tcp_adjust_pcount()
1113 if (tcp_is_reno(tp) && decr > 0) in tcp_adjust_pcount()
1114 tp->sacked_out -= min_t(u32, tp->sacked_out, decr); in tcp_adjust_pcount()
1118 if (tp->lost_skb_hint && in tcp_adjust_pcount()
1119 before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) && in tcp_adjust_pcount()
1120 (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))) in tcp_adjust_pcount()
1121 tp->lost_cnt_hint -= decr; in tcp_adjust_pcount()
1123 tcp_verify_left_out(tp); in tcp_adjust_pcount()
1149 struct tcp_sock *tp = tcp_sk(sk); in tcp_fragment() local
1215 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { in tcp_fragment()
1298 const struct tcp_sock *tp = tcp_sk(sk); in __tcp_mtu_to_mss() local
1316 if (mss_now > tp->rx_opt.mss_clamp) in __tcp_mtu_to_mss()
1317 mss_now = tp->rx_opt.mss_clamp; in __tcp_mtu_to_mss()
1339 const struct tcp_sock *tp = tcp_sk(sk); in tcp_mss_to_mtu() local
1344 tp->tcp_header_len + in tcp_mss_to_mtu()
1361 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtup_init() local
1366 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + in tcp_mtup_init()
1399 struct tcp_sock *tp = tcp_sk(sk); in tcp_sync_mss() local
1407 mss_now = tcp_bound_to_half_wnd(tp, mss_now); in tcp_sync_mss()
1413 tp->mss_cache = mss_now; in tcp_sync_mss()
1424 const struct tcp_sock *tp = tcp_sk(sk); in tcp_current_mss() local
1431 mss_now = tp->mss_cache; in tcp_current_mss()
1445 if (header_len != tp->tcp_header_len) { in tcp_current_mss()
1446 int delta = (int) header_len - tp->tcp_header_len; in tcp_current_mss()
1459 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_application_limited() local
1464 u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk)); in tcp_cwnd_application_limited()
1465 u32 win_used = max(tp->snd_cwnd_used, init_win); in tcp_cwnd_application_limited()
1466 if (win_used < tp->snd_cwnd) { in tcp_cwnd_application_limited()
1467 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_cwnd_application_limited()
1468 tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1; in tcp_cwnd_application_limited()
1470 tp->snd_cwnd_used = 0; in tcp_cwnd_application_limited()
1472 tp->snd_cwnd_stamp = tcp_time_stamp; in tcp_cwnd_application_limited()
1477 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_validate() local
1482 if (!before(tp->snd_una, tp->max_packets_seq) || in tcp_cwnd_validate()
1483 tp->packets_out > tp->max_packets_out) { in tcp_cwnd_validate()
1484 tp->max_packets_out = tp->packets_out; in tcp_cwnd_validate()
1485 tp->max_packets_seq = tp->snd_nxt; in tcp_cwnd_validate()
1486 tp->is_cwnd_limited = is_cwnd_limited; in tcp_cwnd_validate()
1491 tp->snd_cwnd_used = 0; in tcp_cwnd_validate()
1492 tp->snd_cwnd_stamp = tcp_time_stamp; in tcp_cwnd_validate()
1495 if (tp->packets_out > tp->snd_cwnd_used) in tcp_cwnd_validate()
1496 tp->snd_cwnd_used = tp->packets_out; in tcp_cwnd_validate()
1499 (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto) in tcp_cwnd_validate()
1505 static bool tcp_minshall_check(const struct tcp_sock *tp) in tcp_minshall_check() argument
1507 return after(tp->snd_sml, tp->snd_una) && in tcp_minshall_check()
1508 !after(tp->snd_sml, tp->snd_nxt); in tcp_minshall_check()
1519 static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now, in tcp_minshall_update() argument
1523 tp->snd_sml = TCP_SKB_CB(skb)->end_seq; in tcp_minshall_update()
1533 static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp, in tcp_nagle_check() argument
1538 (!nonagle && tp->packets_out && tcp_minshall_check(tp))); in tcp_nagle_check()
1568 const struct tcp_sock *tp = tcp_sk(sk); in tcp_mss_split_point() local
1571 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_mss_split_point()
1587 if (tcp_nagle_check(partial != 0, tp, nonagle)) in tcp_mss_split_point()
1596 static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp, in tcp_cwnd_test() argument
1606 in_flight = tcp_packets_in_flight(tp); in tcp_cwnd_test()
1607 cwnd = tp->snd_cwnd; in tcp_cwnd_test()
1638 static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, in tcp_nagle_test() argument
1651 if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) in tcp_nagle_test()
1654 if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle)) in tcp_nagle_test()
1661 static bool tcp_snd_wnd_test(const struct tcp_sock *tp, in tcp_snd_wnd_test() argument
1670 return !after(end_seq, tcp_wnd_end(tp)); in tcp_snd_wnd_test()
1680 const struct tcp_sock *tp = tcp_sk(sk); in tcp_snd_test() local
1685 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle)) in tcp_snd_test()
1688 cwnd_quota = tcp_cwnd_test(tp, skb); in tcp_snd_test()
1689 if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss)) in tcp_snd_test()
1698 const struct tcp_sock *tp = tcp_sk(sk); in tcp_may_send_now() local
1704 tp->nonagle : TCP_NAGLE_PUSH)); in tcp_may_send_now()
1772 struct tcp_sock *tp = tcp_sk(sk); in tcp_tso_should_defer() local
1786 if ((s32)(tcp_time_stamp - tp->lsndtime) > 0) in tcp_tso_should_defer()
1789 in_flight = tcp_packets_in_flight(tp); in tcp_tso_should_defer()
1791 BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight)); in tcp_tso_should_defer()
1793 send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_tso_should_defer()
1796 cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache; in tcp_tso_should_defer()
1801 if (limit >= max_segs * tp->mss_cache) in tcp_tso_should_defer()
1810 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); in tcp_tso_should_defer()
1824 if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache) in tcp_tso_should_defer()
1832 if (age < (tp->srtt_us >> 4)) in tcp_tso_should_defer()
1849 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtu_check_reprobe() local
1861 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + in tcp_mtu_check_reprobe()
1882 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtu_probe() local
1900 tp->snd_cwnd < 11 || in tcp_mtu_probe()
1901 tp->rx_opt.num_sacks || tp->rx_opt.dsack) in tcp_mtu_probe()
1911 size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; in tcp_mtu_probe()
1927 if (tp->write_seq - tp->snd_nxt < size_needed) in tcp_mtu_probe()
1930 if (tp->snd_wnd < size_needed) in tcp_mtu_probe()
1932 if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp))) in tcp_mtu_probe()
1936 if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) { in tcp_mtu_probe()
1937 if (!tcp_packets_in_flight(tp)) in tcp_mtu_probe()
2005 tp->snd_cwnd--; in tcp_mtu_probe()
2009 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; in tcp_mtu_probe()
2010 tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq; in tcp_mtu_probe()
2035 struct tcp_sock *tp = tcp_sk(sk); in tcp_write_xmit() local
2062 if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { in tcp_write_xmit()
2068 cwnd_quota = tcp_cwnd_test(tp, skb); in tcp_write_xmit()
2078 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) in tcp_write_xmit()
2082 if (unlikely(!tcp_nagle_test(tp, skb, mss_now, in tcp_write_xmit()
2094 if (tso_segs > 1 && max_segs && !tcp_urg_mode(tp)) in tcp_write_xmit()
2119 set_bit(TSQ_THROTTLED, &tp->tsq_flags); in tcp_write_xmit()
2138 tcp_minshall_update(tp, mss_now, skb); in tcp_write_xmit()
2147 tp->prr_out += sent_pkts; in tcp_write_xmit()
2155 return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk)); in tcp_write_xmit()
2161 struct tcp_sock *tp = tcp_sk(sk); in tcp_schedule_loss_probe() local
2163 u32 rtt = usecs_to_jiffies(tp->srtt_us >> 3); in tcp_schedule_loss_probe()
2185 if (sysctl_tcp_early_retrans < 3 || !tp->srtt_us || !tp->packets_out || in tcp_schedule_loss_probe()
2186 !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open) in tcp_schedule_loss_probe()
2189 if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) && in tcp_schedule_loss_probe()
2197 if (tp->packets_out == 1) in tcp_schedule_loss_probe()
2237 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_loss_probe() local
2249 if (tp->tlp_high_seq) in tcp_send_loss_probe()
2278 tp->tlp_high_seq = tp->snd_nxt; in tcp_send_loss_probe()
2376 struct tcp_sock *tp = tcp_sk(sk); in __tcp_select_window() local
2386 int full_space = min_t(int, tp->window_clamp, allowed_space); in __tcp_select_window()
2396 tp->rcv_ssthresh = min(tp->rcv_ssthresh, in __tcp_select_window()
2397 4U * tp->advmss); in __tcp_select_window()
2402 free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale); in __tcp_select_window()
2415 if (free_space > tp->rcv_ssthresh) in __tcp_select_window()
2416 free_space = tp->rcv_ssthresh; in __tcp_select_window()
2421 window = tp->rcv_wnd; in __tcp_select_window()
2422 if (tp->rx_opt.rcv_wscale) { in __tcp_select_window()
2429 if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window) in __tcp_select_window()
2430 window = (((window >> tp->rx_opt.rcv_wscale) + 1) in __tcp_select_window()
2431 << tp->rx_opt.rcv_wscale); in __tcp_select_window()
2454 struct tcp_sock *tp = tcp_sk(sk); in tcp_collapse_retrans() local
2488 tcp_clear_retrans_hints_partial(tp); in tcp_collapse_retrans()
2489 if (next_skb == tp->retransmit_skb_hint) in tcp_collapse_retrans()
2490 tp->retransmit_skb_hint = skb; in tcp_collapse_retrans()
2522 struct tcp_sock *tp = tcp_sk(sk); in tcp_retrans_try_collapse() local
2550 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) in tcp_retrans_try_collapse()
2563 struct tcp_sock *tp = tcp_sk(sk); in __tcp_retransmit_skb() local
2583 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { in __tcp_retransmit_skb()
2584 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) in __tcp_retransmit_skb()
2586 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) in __tcp_retransmit_skb()
2600 if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) && in __tcp_retransmit_skb()
2601 TCP_SKB_CB(skb)->seq != tp->snd_una) in __tcp_retransmit_skb()
2644 tp->total_retrans++; in __tcp_retransmit_skb()
2651 struct tcp_sock *tp = tcp_sk(sk); in tcp_retransmit_skb() local
2660 if (!tp->retrans_out) in tcp_retransmit_skb()
2661 tp->lost_retrans_low = tp->snd_nxt; in tcp_retransmit_skb()
2663 tp->retrans_out += tcp_skb_pcount(skb); in tcp_retransmit_skb()
2666 if (!tp->retrans_stamp) in tcp_retransmit_skb()
2667 tp->retrans_stamp = tcp_skb_timestamp(skb); in tcp_retransmit_skb()
2672 TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt; in tcp_retransmit_skb()
2677 if (tp->undo_retrans < 0) in tcp_retransmit_skb()
2678 tp->undo_retrans = 0; in tcp_retransmit_skb()
2679 tp->undo_retrans += tcp_skb_pcount(skb); in tcp_retransmit_skb()
2689 const struct tcp_sock *tp = tcp_sk(sk); in tcp_can_forward_retransmit() local
2696 if (tcp_is_reno(tp)) in tcp_can_forward_retransmit()
2724 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_retransmit_queue() local
2731 if (!tp->packets_out) in tcp_xmit_retransmit_queue()
2734 if (!tp->lost_out) in tcp_xmit_retransmit_queue()
2735 tp->retransmit_high = tp->snd_una; in tcp_xmit_retransmit_queue()
2737 if (tp->retransmit_skb_hint) { in tcp_xmit_retransmit_queue()
2738 skb = tp->retransmit_skb_hint; in tcp_xmit_retransmit_queue()
2740 if (after(last_lost, tp->retransmit_high)) in tcp_xmit_retransmit_queue()
2741 last_lost = tp->retransmit_high; in tcp_xmit_retransmit_queue()
2744 last_lost = tp->snd_una; in tcp_xmit_retransmit_queue()
2754 tp->retransmit_skb_hint = skb; in tcp_xmit_retransmit_queue()
2763 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) in tcp_xmit_retransmit_queue()
2768 if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) in tcp_xmit_retransmit_queue()
2772 } else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) { in tcp_xmit_retransmit_queue()
2773 tp->retransmit_high = last_lost; in tcp_xmit_retransmit_queue()
2806 tp->prr_out += tcp_skb_pcount(skb); in tcp_xmit_retransmit_queue()
2837 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_fin() local
2848 tp->write_seq++; in tcp_send_fin()
2856 tp->snd_nxt++; in tcp_send_fin()
2869 tcp_init_nondata_skb(skb, tp->write_seq, in tcp_send_fin()
2954 struct tcp_sock *tp = tcp_sk(sk); in tcp_make_synack() local
2972 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) in tcp_make_synack()
2973 mss = tp->rx_opt.user_mss; in tcp_make_synack()
3012 tcp_options_write((__be32 *)(th + 1), tp, &opts); in tcp_make_synack()
3053 struct tcp_sock *tp = tcp_sk(sk); in tcp_connect_init() local
3059 tp->tcp_header_len = sizeof(struct tcphdr) + in tcp_connect_init()
3063 if (tp->af_specific->md5_lookup(sk, sk)) in tcp_connect_init()
3064 tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; in tcp_connect_init()
3068 if (tp->rx_opt.user_mss) in tcp_connect_init()
3069 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; in tcp_connect_init()
3070 tp->max_window = 0; in tcp_connect_init()
3076 if (!tp->window_clamp) in tcp_connect_init()
3077 tp->window_clamp = dst_metric(dst, RTAX_WINDOW); in tcp_connect_init()
3078 tp->advmss = dst_metric_advmss(dst); in tcp_connect_init()
3079 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss) in tcp_connect_init()
3080 tp->advmss = tp->rx_opt.user_mss; in tcp_connect_init()
3086 (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0)) in tcp_connect_init()
3087 tp->window_clamp = tcp_full_space(sk); in tcp_connect_init()
3090 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), in tcp_connect_init()
3091 &tp->rcv_wnd, in tcp_connect_init()
3092 &tp->window_clamp, in tcp_connect_init()
3097 tp->rx_opt.rcv_wscale = rcv_wscale; in tcp_connect_init()
3098 tp->rcv_ssthresh = tp->rcv_wnd; in tcp_connect_init()
3102 tp->snd_wnd = 0; in tcp_connect_init()
3103 tcp_init_wl(tp, 0); in tcp_connect_init()
3104 tp->snd_una = tp->write_seq; in tcp_connect_init()
3105 tp->snd_sml = tp->write_seq; in tcp_connect_init()
3106 tp->snd_up = tp->write_seq; in tcp_connect_init()
3107 tp->snd_nxt = tp->write_seq; in tcp_connect_init()
3109 if (likely(!tp->repair)) in tcp_connect_init()
3110 tp->rcv_nxt = 0; in tcp_connect_init()
3112 tp->rcv_tstamp = tcp_time_stamp; in tcp_connect_init()
3113 tp->rcv_wup = tp->rcv_nxt; in tcp_connect_init()
3114 tp->copied_seq = tp->rcv_nxt; in tcp_connect_init()
3118 tcp_clear_retrans(tp); in tcp_connect_init()
3123 struct tcp_sock *tp = tcp_sk(sk); in tcp_connect_queue_skb() local
3131 tp->write_seq = tcb->end_seq; in tcp_connect_queue_skb()
3132 tp->packets_out += tcp_skb_pcount(skb); in tcp_connect_queue_skb()
3144 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_syn_data() local
3145 struct tcp_fastopen_request *fo = tp->fastopen_req; in tcp_send_syn_data()
3150 tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */ in tcp_send_syn_data()
3151 tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie, in tcp_send_syn_data()
3169 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp) in tcp_send_syn_data()
3170 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; in tcp_send_syn_data()
3215 tp->syn_data = (fo->copied > 0); in tcp_send_syn_data()
3226 tp->syn_fastopen = 0; in tcp_send_syn_data()
3235 struct tcp_sock *tp = tcp_sk(sk); in tcp_connect() local
3241 if (unlikely(tp->repair)) { in tcp_connect()
3250 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); in tcp_connect()
3251 tp->retrans_stamp = tcp_time_stamp; in tcp_connect()
3256 err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) : in tcp_connect()
3264 tp->snd_nxt = tp->write_seq; in tcp_connect()
3265 tp->pushed_seq = tp->write_seq; in tcp_connect()
3288 const struct tcp_sock *tp = tcp_sk(sk); in tcp_send_delayed_ack() local
3301 if (tp->srtt_us) { in tcp_send_delayed_ack()
3302 int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3), in tcp_send_delayed_ack()
3389 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_probe_skb() local
3403 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); in tcp_xmit_probe_skb()
3419 struct tcp_sock *tp = tcp_sk(sk); in tcp_write_wakeup() local
3426 if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { in tcp_write_wakeup()
3429 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_write_wakeup()
3431 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) in tcp_write_wakeup()
3432 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; in tcp_write_wakeup()
3453 if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF)) in tcp_write_wakeup()
3465 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_probe0() local
3471 if (tp->packets_out || !tcp_send_head(sk)) { in tcp_send_probe0()