Lines Matching refs:sk
68 static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
72 static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb) in tcp_event_new_data_sent() argument
74 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_new_data_sent()
75 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_new_data_sent()
78 tcp_advance_send_head(sk, skb); in tcp_event_new_data_sent()
84 tcp_rearm_rto(sk); in tcp_event_new_data_sent()
87 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT, in tcp_event_new_data_sent()
97 static inline __u32 tcp_acceptable_seq(const struct sock *sk) in tcp_acceptable_seq() argument
99 const struct tcp_sock *tp = tcp_sk(sk); in tcp_acceptable_seq()
121 static __u16 tcp_advertise_mss(struct sock *sk) in tcp_advertise_mss() argument
123 struct tcp_sock *tp = tcp_sk(sk); in tcp_advertise_mss()
124 const struct dst_entry *dst = __sk_dst_get(sk); in tcp_advertise_mss()
142 void tcp_cwnd_restart(struct sock *sk, s32 delta) in tcp_cwnd_restart() argument
144 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_restart()
145 u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk)); in tcp_cwnd_restart()
148 tcp_ca_event(sk, CA_EVENT_CWND_RESTART); in tcp_cwnd_restart()
150 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_cwnd_restart()
153 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) in tcp_cwnd_restart()
162 struct sock *sk) in tcp_event_data_sent() argument
164 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_data_sent()
168 tcp_ca_event(sk, CA_EVENT_TX_START); in tcp_event_data_sent()
180 static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) in tcp_event_ack_sent() argument
182 tcp_dec_quickack_mode(sk, pkts); in tcp_event_ack_sent()
183 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); in tcp_event_ack_sent()
266 static u16 tcp_select_window(struct sock *sk) in tcp_select_window() argument
268 struct tcp_sock *tp = tcp_sk(sk); in tcp_select_window()
271 u32 new_win = __tcp_select_window(sk); in tcp_select_window()
283 NET_INC_STATS(sock_net(sk), in tcp_select_window()
305 NET_INC_STATS(sock_net(sk), in tcp_select_window()
308 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV); in tcp_select_window()
315 static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb) in tcp_ecn_send_synack() argument
317 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send_synack()
322 else if (tcp_ca_needs_ecn(sk)) in tcp_ecn_send_synack()
323 INET_ECN_xmit(sk); in tcp_ecn_send_synack()
327 static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb) in tcp_ecn_send_syn() argument
329 struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send_syn()
330 bool use_ecn = sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 || in tcp_ecn_send_syn()
331 tcp_ca_needs_ecn(sk); in tcp_ecn_send_syn()
334 const struct dst_entry *dst = __sk_dst_get(sk); in tcp_ecn_send_syn()
345 if (tcp_ca_needs_ecn(sk)) in tcp_ecn_send_syn()
346 INET_ECN_xmit(sk); in tcp_ecn_send_syn()
350 static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb) in tcp_ecn_clear_syn() argument
352 if (sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback) in tcp_ecn_clear_syn()
369 static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb, in tcp_ecn_send() argument
372 struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send()
378 INET_ECN_xmit(sk); in tcp_ecn_send()
384 } else if (!tcp_ca_needs_ecn(sk)) { in tcp_ecn_send()
386 INET_ECN_dontxmit(sk); in tcp_ecn_send()
545 static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, in tcp_syn_options() argument
549 struct tcp_sock *tp = tcp_sk(sk); in tcp_syn_options()
554 *md5 = tp->af_specific->md5_lookup(sk, sk); in tcp_syn_options()
572 opts->mss = tcp_advertise_mss(sk); in tcp_syn_options()
673 static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb, in tcp_established_options() argument
677 struct tcp_sock *tp = tcp_sk(sk); in tcp_established_options()
684 *md5 = tp->af_specific->md5_lookup(sk, sk); in tcp_established_options()
735 static void tcp_tsq_handler(struct sock *sk) in tcp_tsq_handler() argument
737 if ((1 << sk->sk_state) & in tcp_tsq_handler()
740 tcp_write_xmit(sk, tcp_current_mss(sk), tcp_sk(sk)->nonagle, in tcp_tsq_handler()
756 struct sock *sk; in tcp_tasklet_func() local
766 sk = (struct sock *)tp; in tcp_tasklet_func()
767 bh_lock_sock(sk); in tcp_tasklet_func()
769 if (!sock_owned_by_user(sk)) { in tcp_tasklet_func()
770 tcp_tsq_handler(sk); in tcp_tasklet_func()
775 bh_unlock_sock(sk); in tcp_tasklet_func()
778 sk_free(sk); in tcp_tasklet_func()
793 void tcp_release_cb(struct sock *sk) in tcp_release_cb() argument
795 struct tcp_sock *tp = tcp_sk(sk); in tcp_release_cb()
807 tcp_tsq_handler(sk); in tcp_release_cb()
818 sock_release_ownership(sk); in tcp_release_cb()
821 tcp_write_timer_handler(sk); in tcp_release_cb()
822 __sock_put(sk); in tcp_release_cb()
825 tcp_delack_timer_handler(sk); in tcp_release_cb()
826 __sock_put(sk); in tcp_release_cb()
829 inet_csk(sk)->icsk_af_ops->mtu_reduced(sk); in tcp_release_cb()
830 __sock_put(sk); in tcp_release_cb()
856 struct sock *sk = skb->sk; in tcp_wfree() local
857 struct tcp_sock *tp = tcp_sk(sk); in tcp_wfree()
863 wmem = atomic_sub_return(skb->truesize - 1, &sk->sk_wmem_alloc); in tcp_wfree()
889 sk_free(sk); in tcp_wfree()
903 static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, in tcp_transmit_skb() argument
906 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_transmit_skb()
929 inet = inet_sk(sk); in tcp_transmit_skb()
930 tp = tcp_sk(sk); in tcp_transmit_skb()
935 tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5); in tcp_transmit_skb()
937 tcp_options_size = tcp_established_options(sk, skb, &opts, in tcp_transmit_skb()
948 skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1); in tcp_transmit_skb()
954 skb->sk = sk; in tcp_transmit_skb()
956 skb_set_hash_from_sk(skb, sk); in tcp_transmit_skb()
957 atomic_add(skb->truesize, &sk->sk_wmem_alloc); in tcp_transmit_skb()
974 th->window = htons(tcp_select_window(sk)); in tcp_transmit_skb()
991 skb_shinfo(skb)->gso_type = sk->sk_gso_type; in tcp_transmit_skb()
993 tcp_ecn_send(sk, skb, tcp_header_size); in tcp_transmit_skb()
998 sk_nocaps_add(sk, NETIF_F_GSO_MASK); in tcp_transmit_skb()
1000 md5, sk, skb); in tcp_transmit_skb()
1004 icsk->icsk_af_ops->send_check(sk, skb); in tcp_transmit_skb()
1007 tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); in tcp_transmit_skb()
1010 tcp_event_data_sent(tp, sk); in tcp_transmit_skb()
1013 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, in tcp_transmit_skb()
1028 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); in tcp_transmit_skb()
1033 tcp_enter_cwr(sk); in tcp_transmit_skb()
1043 static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) in tcp_queue_skb() argument
1045 struct tcp_sock *tp = tcp_sk(sk); in tcp_queue_skb()
1050 tcp_add_write_queue_tail(sk, skb); in tcp_queue_skb()
1051 sk->sk_wmem_queued += skb->truesize; in tcp_queue_skb()
1052 sk_mem_charge(sk, skb->truesize); in tcp_queue_skb()
1073 static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb, in tcp_adjust_fackets_out() argument
1076 struct tcp_sock *tp = tcp_sk(sk); in tcp_adjust_fackets_out()
1088 static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr) in tcp_adjust_pcount() argument
1090 struct tcp_sock *tp = tcp_sk(sk); in tcp_adjust_pcount()
1105 tcp_adjust_fackets_out(sk, skb, decr); in tcp_adjust_pcount()
1135 int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, in tcp_fragment() argument
1138 struct tcp_sock *tp = tcp_sk(sk); in tcp_fragment()
1155 buff = sk_stream_alloc_skb(sk, nsize, gfp, true); in tcp_fragment()
1159 sk->sk_wmem_queued += buff->truesize; in tcp_fragment()
1160 sk_mem_charge(sk, buff->truesize); in tcp_fragment()
1209 tcp_adjust_pcount(sk, skb, diff); in tcp_fragment()
1214 tcp_insert_write_queue_after(skb, buff, sk); in tcp_fragment()
1262 int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) in tcp_trim_head() argument
1273 sk->sk_wmem_queued -= len; in tcp_trim_head()
1274 sk_mem_uncharge(sk, len); in tcp_trim_head()
1275 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); in tcp_trim_head()
1285 static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu) in __tcp_mtu_to_mss() argument
1287 const struct tcp_sock *tp = tcp_sk(sk); in __tcp_mtu_to_mss()
1288 const struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_mtu_to_mss()
1298 const struct dst_entry *dst = __sk_dst_get(sk); in __tcp_mtu_to_mss()
1318 int tcp_mtu_to_mss(struct sock *sk, int pmtu) in tcp_mtu_to_mss() argument
1321 return __tcp_mtu_to_mss(sk, pmtu) - in tcp_mtu_to_mss()
1322 (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr)); in tcp_mtu_to_mss()
1326 int tcp_mss_to_mtu(struct sock *sk, int mss) in tcp_mss_to_mtu() argument
1328 const struct tcp_sock *tp = tcp_sk(sk); in tcp_mss_to_mtu()
1329 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mss_to_mtu()
1339 const struct dst_entry *dst = __sk_dst_get(sk); in tcp_mss_to_mtu()
1348 void tcp_mtup_init(struct sock *sk) in tcp_mtup_init() argument
1350 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtup_init()
1351 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtup_init()
1352 struct net *net = sock_net(sk); in tcp_mtup_init()
1357 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss); in tcp_mtup_init()
1386 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) in tcp_sync_mss() argument
1388 struct tcp_sock *tp = tcp_sk(sk); in tcp_sync_mss()
1389 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_sync_mss()
1395 mss_now = tcp_mtu_to_mss(sk, pmtu); in tcp_sync_mss()
1401 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); in tcp_sync_mss()
1411 unsigned int tcp_current_mss(struct sock *sk) in tcp_current_mss() argument
1413 const struct tcp_sock *tp = tcp_sk(sk); in tcp_current_mss()
1414 const struct dst_entry *dst = __sk_dst_get(sk); in tcp_current_mss()
1424 if (mtu != inet_csk(sk)->icsk_pmtu_cookie) in tcp_current_mss()
1425 mss_now = tcp_sync_mss(sk, mtu); in tcp_current_mss()
1428 header_len = tcp_established_options(sk, NULL, &opts, &md5) + in tcp_current_mss()
1446 static void tcp_cwnd_application_limited(struct sock *sk) in tcp_cwnd_application_limited() argument
1448 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_application_limited()
1450 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && in tcp_cwnd_application_limited()
1451 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { in tcp_cwnd_application_limited()
1453 u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk)); in tcp_cwnd_application_limited()
1456 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_cwnd_application_limited()
1464 static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) in tcp_cwnd_validate() argument
1466 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_validate()
1478 if (tcp_is_cwnd_limited(sk)) { in tcp_cwnd_validate()
1488 (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto) in tcp_cwnd_validate()
1489 tcp_cwnd_application_limited(sk); in tcp_cwnd_validate()
1533 static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now) in tcp_tso_autosize() argument
1537 bytes = min(sk->sk_pacing_rate >> 10, in tcp_tso_autosize()
1538 sk->sk_gso_max_size - 1 - MAX_TCP_HEADER); in tcp_tso_autosize()
1547 return min_t(u32, segs, sk->sk_gso_max_segs); in tcp_tso_autosize()
1551 static unsigned int tcp_mss_split_point(const struct sock *sk, in tcp_mss_split_point() argument
1557 const struct tcp_sock *tp = tcp_sk(sk); in tcp_mss_split_point()
1563 if (likely(max_len <= window && skb != tcp_write_queue_tail(sk))) in tcp_mss_split_point()
1665 static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb, in tcp_snd_test() argument
1668 const struct tcp_sock *tp = tcp_sk(sk); in tcp_snd_test()
1684 bool tcp_may_send_now(struct sock *sk) in tcp_may_send_now() argument
1686 const struct tcp_sock *tp = tcp_sk(sk); in tcp_may_send_now()
1687 struct sk_buff *skb = tcp_send_head(sk); in tcp_may_send_now()
1690 tcp_snd_test(sk, skb, tcp_current_mss(sk), in tcp_may_send_now()
1691 (tcp_skb_is_last(sk, skb) ? in tcp_may_send_now()
1702 static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, in tso_fragment() argument
1711 return tcp_fragment(sk, skb, len, mss_now, gfp); in tso_fragment()
1713 buff = sk_stream_alloc_skb(sk, 0, gfp, true); in tso_fragment()
1717 sk->sk_wmem_queued += buff->truesize; in tso_fragment()
1718 sk_mem_charge(sk, buff->truesize); in tso_fragment()
1745 tcp_insert_write_queue_after(skb, buff, sk); in tso_fragment()
1755 static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, in tcp_tso_should_defer() argument
1758 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_tso_should_defer()
1760 struct tcp_sock *tp = tcp_sk(sk); in tcp_tso_should_defer()
1793 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) in tcp_tso_should_defer()
1816 head = tcp_write_queue_head(sk); in tcp_tso_should_defer()
1834 static inline void tcp_mtu_check_reprobe(struct sock *sk) in tcp_mtu_check_reprobe() argument
1836 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtu_check_reprobe()
1837 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtu_check_reprobe()
1838 struct net *net = sock_net(sk); in tcp_mtu_check_reprobe()
1845 int mss = tcp_current_mss(sk); in tcp_mtu_check_reprobe()
1852 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); in tcp_mtu_check_reprobe()
1868 static int tcp_mtu_probe(struct sock *sk) in tcp_mtu_probe() argument
1870 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtu_probe()
1871 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtu_probe()
1873 struct net *net = sock_net(sk); in tcp_mtu_probe()
1887 inet_csk(sk)->icsk_ca_state != TCP_CA_Open || in tcp_mtu_probe()
1896 mss_now = tcp_current_mss(sk); in tcp_mtu_probe()
1897 probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high + in tcp_mtu_probe()
1905 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) || in tcp_mtu_probe()
1910 tcp_mtu_check_reprobe(sk); in tcp_mtu_probe()
1932 nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false); in tcp_mtu_probe()
1935 sk->sk_wmem_queued += nskb->truesize; in tcp_mtu_probe()
1936 sk_mem_charge(sk, nskb->truesize); in tcp_mtu_probe()
1938 skb = tcp_send_head(sk); in tcp_mtu_probe()
1947 tcp_insert_write_queue_before(nskb, skb, sk); in tcp_mtu_probe()
1950 tcp_for_write_queue_from_safe(skb, next, sk) { in tcp_mtu_probe()
1963 tcp_unlink_write_queue(skb, sk); in tcp_mtu_probe()
1964 sk_wmem_free_skb(sk, skb); in tcp_mtu_probe()
1990 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { in tcp_mtu_probe()
1994 tcp_event_new_data_sent(sk, nskb); in tcp_mtu_probe()
1996 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); in tcp_mtu_probe()
2020 static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, in tcp_write_xmit() argument
2023 struct tcp_sock *tp = tcp_sk(sk); in tcp_write_xmit()
2035 result = tcp_mtu_probe(sk); in tcp_write_xmit()
2043 max_segs = tcp_tso_autosize(sk, mss_now); in tcp_write_xmit()
2044 while ((skb = tcp_send_head(sk))) { in tcp_write_xmit()
2070 (tcp_skb_is_last(sk, skb) ? in tcp_write_xmit()
2075 tcp_tso_should_defer(sk, skb, &is_cwnd_limited, in tcp_write_xmit()
2082 limit = tcp_mss_split_point(sk, skb, mss_now, in tcp_write_xmit()
2089 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) in tcp_write_xmit()
2102 limit = max(2 * skb->truesize, sk->sk_pacing_rate >> 10); in tcp_write_xmit()
2105 if (atomic_read(&sk->sk_wmem_alloc) > limit) { in tcp_write_xmit()
2112 if (atomic_read(&sk->sk_wmem_alloc) > limit) in tcp_write_xmit()
2116 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) in tcp_write_xmit()
2123 tcp_event_new_data_sent(sk, skb); in tcp_write_xmit()
2133 if (tcp_in_cwnd_reduction(sk)) in tcp_write_xmit()
2138 tcp_schedule_loss_probe(sk); in tcp_write_xmit()
2140 tcp_cwnd_validate(sk, is_cwnd_limited); in tcp_write_xmit()
2143 return !tp->packets_out && tcp_send_head(sk); in tcp_write_xmit()
2146 bool tcp_schedule_loss_probe(struct sock *sk) in tcp_schedule_loss_probe() argument
2148 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_schedule_loss_probe()
2149 struct tcp_sock *tp = tcp_sk(sk); in tcp_schedule_loss_probe()
2157 tcp_rearm_rto(sk); in tcp_schedule_loss_probe()
2174 !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open) in tcp_schedule_loss_probe()
2178 tcp_send_head(sk)) in tcp_schedule_loss_probe()
2193 rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout; in tcp_schedule_loss_probe()
2200 inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, in tcp_schedule_loss_probe()
2210 static bool skb_still_in_host_queue(const struct sock *sk, in skb_still_in_host_queue() argument
2213 if (unlikely(skb_fclone_busy(sk, skb))) { in skb_still_in_host_queue()
2214 NET_INC_STATS_BH(sock_net(sk), in skb_still_in_host_queue()
2224 void tcp_send_loss_probe(struct sock *sk) in tcp_send_loss_probe() argument
2226 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_loss_probe()
2229 int mss = tcp_current_mss(sk); in tcp_send_loss_probe()
2231 skb = tcp_send_head(sk); in tcp_send_loss_probe()
2235 tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC); in tcp_send_loss_probe()
2240 skb = tcp_write_queue_prev(sk, skb); in tcp_send_loss_probe()
2242 skb = tcp_write_queue_tail(sk); in tcp_send_loss_probe()
2253 if (skb_still_in_host_queue(sk, skb)) in tcp_send_loss_probe()
2261 if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss, in tcp_send_loss_probe()
2264 skb = tcp_write_queue_next(sk, skb); in tcp_send_loss_probe()
2270 if (__tcp_retransmit_skb(sk, skb)) in tcp_send_loss_probe()
2277 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSPROBES); in tcp_send_loss_probe()
2279 inet_csk(sk)->icsk_pending = 0; in tcp_send_loss_probe()
2281 tcp_rearm_rto(sk); in tcp_send_loss_probe()
2288 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, in __tcp_push_pending_frames() argument
2295 if (unlikely(sk->sk_state == TCP_CLOSE)) in __tcp_push_pending_frames()
2298 if (tcp_write_xmit(sk, cur_mss, nonagle, 0, in __tcp_push_pending_frames()
2299 sk_gfp_atomic(sk, GFP_ATOMIC))) in __tcp_push_pending_frames()
2300 tcp_check_probe_timer(sk); in __tcp_push_pending_frames()
2306 void tcp_push_one(struct sock *sk, unsigned int mss_now) in tcp_push_one() argument
2308 struct sk_buff *skb = tcp_send_head(sk); in tcp_push_one()
2312 tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation); in tcp_push_one()
2367 u32 __tcp_select_window(struct sock *sk) in __tcp_select_window() argument
2369 struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_select_window()
2370 struct tcp_sock *tp = tcp_sk(sk); in __tcp_select_window()
2378 int free_space = tcp_space(sk); in __tcp_select_window()
2379 int allowed_space = tcp_full_space(sk); in __tcp_select_window()
2389 if (tcp_under_memory_pressure(sk)) in __tcp_select_window()
2446 static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) in tcp_collapse_retrans() argument
2448 struct tcp_sock *tp = tcp_sk(sk); in tcp_collapse_retrans()
2449 struct sk_buff *next_skb = tcp_write_queue_next(sk, skb); in tcp_collapse_retrans()
2457 tcp_highest_sack_combine(sk, next_skb, skb); in tcp_collapse_retrans()
2459 tcp_unlink_write_queue(next_skb, sk); in tcp_collapse_retrans()
2486 tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb)); in tcp_collapse_retrans()
2488 sk_wmem_free_skb(sk, next_skb); in tcp_collapse_retrans()
2492 static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb) in tcp_can_collapse() argument
2501 if (skb == tcp_send_head(sk)) in tcp_can_collapse()
2513 static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, in tcp_retrans_try_collapse() argument
2516 struct tcp_sock *tp = tcp_sk(sk); in tcp_retrans_try_collapse()
2525 tcp_for_write_queue_from_safe(skb, tmp, sk) { in tcp_retrans_try_collapse()
2526 if (!tcp_can_collapse(sk, skb)) in tcp_retrans_try_collapse()
2547 tcp_collapse_retrans(sk, to); in tcp_retrans_try_collapse()
2555 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) in __tcp_retransmit_skb() argument
2557 struct tcp_sock *tp = tcp_sk(sk); in __tcp_retransmit_skb()
2558 struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_retransmit_skb()
2570 if (atomic_read(&sk->sk_wmem_alloc) > in __tcp_retransmit_skb()
2571 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) in __tcp_retransmit_skb()
2574 if (skb_still_in_host_queue(sk, skb)) in __tcp_retransmit_skb()
2580 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) in __tcp_retransmit_skb()
2584 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) in __tcp_retransmit_skb()
2587 cur_mss = tcp_current_mss(sk); in __tcp_retransmit_skb()
2599 if (tcp_fragment(sk, skb, cur_mss, cur_mss, GFP_ATOMIC)) in __tcp_retransmit_skb()
2608 tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb)); in __tcp_retransmit_skb()
2614 tcp_ecn_clear_syn(sk, skb); in __tcp_retransmit_skb()
2616 tcp_retrans_try_collapse(sk, skb, cur_mss); in __tcp_retransmit_skb()
2632 err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : in __tcp_retransmit_skb()
2635 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); in __tcp_retransmit_skb()
2641 TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); in __tcp_retransmit_skb()
2643 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); in __tcp_retransmit_skb()
2649 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) in tcp_retransmit_skb() argument
2651 struct tcp_sock *tp = tcp_sk(sk); in tcp_retransmit_skb()
2652 int err = __tcp_retransmit_skb(sk, skb); in tcp_retransmit_skb()
2668 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); in tcp_retransmit_skb()
2680 static bool tcp_can_forward_retransmit(struct sock *sk) in tcp_can_forward_retransmit() argument
2682 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_can_forward_retransmit()
2683 const struct tcp_sock *tp = tcp_sk(sk); in tcp_can_forward_retransmit()
2701 if (tcp_may_send_now(sk)) in tcp_can_forward_retransmit()
2715 void tcp_xmit_retransmit_queue(struct sock *sk) in tcp_xmit_retransmit_queue() argument
2717 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_xmit_retransmit_queue()
2718 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_retransmit_queue()
2737 skb = tcp_write_queue_head(sk); in tcp_xmit_retransmit_queue()
2741 tcp_for_write_queue_from(skb, sk) { in tcp_xmit_retransmit_queue()
2744 if (skb == tcp_send_head(sk)) in tcp_xmit_retransmit_queue()
2768 if (!tcp_can_forward_retransmit(sk)) in tcp_xmit_retransmit_queue()
2794 if (tcp_retransmit_skb(sk, skb)) in tcp_xmit_retransmit_queue()
2797 NET_INC_STATS_BH(sock_net(sk), mib_idx); in tcp_xmit_retransmit_queue()
2799 if (tcp_in_cwnd_reduction(sk)) in tcp_xmit_retransmit_queue()
2802 if (skb == tcp_write_queue_head(sk)) in tcp_xmit_retransmit_queue()
2803 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, in tcp_xmit_retransmit_queue()
2804 inet_csk(sk)->icsk_rto, in tcp_xmit_retransmit_queue()
2816 void sk_forced_mem_schedule(struct sock *sk, int size) in sk_forced_mem_schedule() argument
2820 if (size <= sk->sk_forward_alloc) in sk_forced_mem_schedule()
2823 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; in sk_forced_mem_schedule()
2824 sk_memory_allocated_add(sk, amt, &status); in sk_forced_mem_schedule()
2830 void tcp_send_fin(struct sock *sk) in tcp_send_fin() argument
2832 struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk); in tcp_send_fin()
2833 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_fin()
2840 if (tskb && (tcp_send_head(sk) || tcp_under_memory_pressure(sk))) { in tcp_send_fin()
2845 if (!tcp_send_head(sk)) { in tcp_send_fin()
2856 skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation); in tcp_send_fin()
2863 sk_forced_mem_schedule(sk, skb->truesize); in tcp_send_fin()
2867 tcp_queue_skb(sk, skb); in tcp_send_fin()
2869 __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF); in tcp_send_fin()
2877 void tcp_send_active_reset(struct sock *sk, gfp_t priority) in tcp_send_active_reset() argument
2884 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); in tcp_send_active_reset()
2890 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), in tcp_send_active_reset()
2894 if (tcp_transmit_skb(sk, skb, 0, priority)) in tcp_send_active_reset()
2895 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); in tcp_send_active_reset()
2897 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS); in tcp_send_active_reset()
2906 int tcp_send_synack(struct sock *sk) in tcp_send_synack() argument
2910 skb = tcp_write_queue_head(sk); in tcp_send_synack()
2920 tcp_unlink_write_queue(skb, sk); in tcp_send_synack()
2922 __tcp_add_write_queue_head(sk, nskb); in tcp_send_synack()
2923 sk_wmem_free_skb(sk, skb); in tcp_send_synack()
2924 sk->sk_wmem_queued += nskb->truesize; in tcp_send_synack()
2925 sk_mem_charge(sk, nskb->truesize); in tcp_send_synack()
2930 tcp_ecn_send_synack(sk, skb); in tcp_send_synack()
2932 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); in tcp_send_synack()
2944 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, in tcp_make_synack() argument
2950 const struct tcp_sock *tp = tcp_sk(sk); in tcp_make_synack()
2974 skb_set_owner_w(skb, (struct sock *)sk); in tcp_make_synack()
2993 md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req)); in tcp_make_synack()
3023 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_OUTSEGS); in tcp_make_synack()
3039 static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst) in tcp_ca_dst_init() argument
3041 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_dst_init()
3059 static void tcp_connect_init(struct sock *sk) in tcp_connect_init() argument
3061 const struct dst_entry *dst = __sk_dst_get(sk); in tcp_connect_init()
3062 struct tcp_sock *tp = tcp_sk(sk); in tcp_connect_init()
3072 if (tp->af_specific->md5_lookup(sk, sk)) in tcp_connect_init()
3080 tcp_mtup_init(sk); in tcp_connect_init()
3081 tcp_sync_mss(sk, dst_mtu(dst)); in tcp_connect_init()
3083 tcp_ca_dst_init(sk, dst); in tcp_connect_init()
3091 tcp_initialize_rcv_mss(sk); in tcp_connect_init()
3094 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && in tcp_connect_init()
3095 (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0)) in tcp_connect_init()
3096 tp->window_clamp = tcp_full_space(sk); in tcp_connect_init()
3098 tcp_select_initial_window(tcp_full_space(sk), in tcp_connect_init()
3109 sk->sk_err = 0; in tcp_connect_init()
3110 sock_reset_flag(sk, SOCK_DONE); in tcp_connect_init()
3125 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; in tcp_connect_init()
3126 inet_csk(sk)->icsk_retransmits = 0; in tcp_connect_init()
3130 static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb) in tcp_connect_queue_skb() argument
3132 struct tcp_sock *tp = tcp_sk(sk); in tcp_connect_queue_skb()
3137 __tcp_add_write_queue_tail(sk, skb); in tcp_connect_queue_skb()
3138 sk->sk_wmem_queued += skb->truesize; in tcp_connect_queue_skb()
3139 sk_mem_charge(sk, skb->truesize); in tcp_connect_queue_skb()
3151 static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) in tcp_send_syn_data() argument
3153 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_syn_data()
3160 tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie, in tcp_send_syn_data()
3180 space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - in tcp_send_syn_data()
3188 syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation, false); in tcp_send_syn_data()
3210 tcp_connect_queue_skb(sk, syn_data); in tcp_send_syn_data()
3212 err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation); in tcp_send_syn_data()
3225 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT); in tcp_send_syn_data()
3233 err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation); in tcp_send_syn_data()
3242 int tcp_connect(struct sock *sk) in tcp_connect() argument
3244 struct tcp_sock *tp = tcp_sk(sk); in tcp_connect()
3248 tcp_connect_init(sk); in tcp_connect()
3251 tcp_finish_connect(sk, NULL); in tcp_connect()
3255 buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, true); in tcp_connect()
3261 tcp_connect_queue_skb(sk, buff); in tcp_connect()
3262 tcp_ecn_send_syn(sk, buff); in tcp_connect()
3265 err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) : in tcp_connect()
3266 tcp_transmit_skb(sk, buff, 1, sk->sk_allocation); in tcp_connect()
3275 TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS); in tcp_connect()
3278 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, in tcp_connect()
3279 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); in tcp_connect()
3288 void tcp_send_delayed_ack(struct sock *sk) in tcp_send_delayed_ack() argument
3290 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_send_delayed_ack()
3294 tcp_ca_event(sk, CA_EVENT_DELAYED_ACK); in tcp_send_delayed_ack()
3297 const struct tcp_sock *tp = tcp_sk(sk); in tcp_send_delayed_ack()
3331 tcp_send_ack(sk); in tcp_send_delayed_ack()
3340 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); in tcp_send_delayed_ack()
3344 void tcp_send_ack(struct sock *sk) in tcp_send_ack() argument
3349 if (sk->sk_state == TCP_CLOSE) in tcp_send_ack()
3352 tcp_ca_event(sk, CA_EVENT_NON_DELAYED_ACK); in tcp_send_ack()
3358 buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); in tcp_send_ack()
3360 inet_csk_schedule_ack(sk); in tcp_send_ack()
3361 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; in tcp_send_ack()
3362 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, in tcp_send_ack()
3369 tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK); in tcp_send_ack()
3381 tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC)); in tcp_send_ack()
3396 static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib) in tcp_xmit_probe_skb() argument
3398 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_probe_skb()
3402 skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); in tcp_xmit_probe_skb()
3414 NET_INC_STATS(sock_net(sk), mib); in tcp_xmit_probe_skb()
3415 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); in tcp_xmit_probe_skb()
3418 void tcp_send_window_probe(struct sock *sk) in tcp_send_window_probe() argument
3420 if (sk->sk_state == TCP_ESTABLISHED) { in tcp_send_window_probe()
3421 tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; in tcp_send_window_probe()
3422 tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE); in tcp_send_window_probe()
3427 int tcp_write_wakeup(struct sock *sk, int mib) in tcp_write_wakeup() argument
3429 struct tcp_sock *tp = tcp_sk(sk); in tcp_write_wakeup()
3432 if (sk->sk_state == TCP_CLOSE) in tcp_write_wakeup()
3435 skb = tcp_send_head(sk); in tcp_write_wakeup()
3438 unsigned int mss = tcp_current_mss(sk); in tcp_write_wakeup()
3452 if (tcp_fragment(sk, skb, seg_size, mss, GFP_ATOMIC)) in tcp_write_wakeup()
3458 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); in tcp_write_wakeup()
3460 tcp_event_new_data_sent(sk, skb); in tcp_write_wakeup()
3464 tcp_xmit_probe_skb(sk, 1, mib); in tcp_write_wakeup()
3465 return tcp_xmit_probe_skb(sk, 0, mib); in tcp_write_wakeup()
3472 void tcp_send_probe0(struct sock *sk) in tcp_send_probe0() argument
3474 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_send_probe0()
3475 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_probe0()
3479 err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE); in tcp_send_probe0()
3481 if (tp->packets_out || !tcp_send_head(sk)) { in tcp_send_probe0()
3504 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, in tcp_send_probe0()
3505 tcp_probe0_when(sk, probe_max), in tcp_send_probe0()
3509 int tcp_rtx_synack(const struct sock *sk, struct request_sock *req) in tcp_rtx_synack() argument
3516 res = af_ops->send_synack(sk, NULL, &fl, req, NULL, true); in tcp_rtx_synack()
3518 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); in tcp_rtx_synack()
3519 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); in tcp_rtx_synack()