/linux-4.1.27/net/ipv4/ |
D | tcp_input.c | 163 len -= tcp_sk(sk)->tcp_header_len; in tcp_measure_rcv_mss() 179 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); in tcp_incr_quickack() 285 const struct tcp_sock *tp = tcp_sk(sk); in tcp_sndbuf_expand() 340 struct tcp_sock *tp = tcp_sk(sk); in __tcp_grow_window() 357 struct tcp_sock *tp = tcp_sk(sk); in tcp_grow_window() 385 u32 mss = tcp_sk(sk)->advmss; in tcp_fixup_rcvbuf() 406 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_buffer_space() 442 struct tcp_sock *tp = tcp_sk(sk); in tcp_clamp_window() 467 const struct tcp_sock *tp = tcp_sk(sk); in tcp_initialize_rcv_mss() 541 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_rtt_measure_ts() [all …]
|
D | tcp_timer.c | 57 struct tcp_sock *tp = tcp_sk(sk); in tcp_out_of_resources() 114 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtu_probing() 142 start_ts = tcp_sk(sk)->retrans_stamp; in retransmits_timed_out() 162 struct tcp_sock *tp = tcp_sk(sk); in tcp_write_timeout() 221 struct tcp_sock *tp = tcp_sk(sk); in tcp_delack_timer_handler() 277 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags)) in tcp_delack_timer() 287 struct tcp_sock *tp = tcp_sk(sk); in tcp_probe_timer() 341 req = tcp_sk(sk)->fastopen_rsk; in tcp_fastopen_synack_timer() 365 struct tcp_sock *tp = tcp_sk(sk); in tcp_retransmit_timer() 547 if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags)) in tcp_write_timer() [all …]
|
D | tcp_westwood.c | 72 w->snd_una = tcp_sk(sk)->snd_una; in tcp_westwood_init() 125 w->snd_una = tcp_sk(sk)->snd_una; in westwood_update_window() 163 const struct tcp_sock *tp = tcp_sk(sk); in westwood_fast_bw() 180 const struct tcp_sock *tp = tcp_sk(sk); in westwood_acked_count() 217 const struct tcp_sock *tp = tcp_sk(sk); in tcp_westwood_bw_rttmin() 240 struct tcp_sock *tp = tcp_sk(sk); in tcp_westwood_event()
|
D | tcp_output.c | 75 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_new_data_sent() 99 const struct tcp_sock *tp = tcp_sk(sk); in tcp_acceptable_seq() 123 struct tcp_sock *tp = tcp_sk(sk); in tcp_advertise_mss() 143 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_restart() 271 struct tcp_sock *tp = tcp_sk(sk); in tcp_select_window() 320 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send_synack() 332 struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send_syn() 370 struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send() 551 struct tcp_sock *tp = tcp_sk(sk); in tcp_syn_options() 680 struct tcp_sock *tp = tcp_sk(sk); in tcp_established_options() [all …]
|
D | tcp_cubic.c | 131 struct tcp_sock *tp = tcp_sk(sk); in bictcp_hystart_reset() 151 tcp_sk(sk)->snd_ssthresh = initial_ssthresh; in bictcp_init() 161 delta = now - tcp_sk(sk)->lsndtime; in bictcp_cwnd_event() 338 struct tcp_sock *tp = tcp_sk(sk); in bictcp_cong_avoid() 357 const struct tcp_sock *tp = tcp_sk(sk); in bictcp_recalc_ssthresh() 378 return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); in bictcp_undo_cwnd() 391 struct tcp_sock *tp = tcp_sk(sk); in hystart_update() 442 const struct tcp_sock *tp = tcp_sk(sk); in bictcp_acked()
|
D | tcp_highspeed.c | 101 struct tcp_sock *tp = tcp_sk(sk); in hstcp_init() 113 struct tcp_sock *tp = tcp_sk(sk); in hstcp_cong_avoid() 152 const struct tcp_sock *tp = tcp_sk(sk); in hstcp_ssthresh()
|
D | tcp_dctcp.c | 86 const struct tcp_sock *tp = tcp_sk(sk); in dctcp_init() 115 struct tcp_sock *tp = tcp_sk(sk); in dctcp_ssthresh() 129 struct tcp_sock *tp = tcp_sk(sk); in dctcp_ce_state_0_to_1() 159 struct tcp_sock *tp = tcp_sk(sk); in dctcp_ce_state_1_to_0() 188 const struct tcp_sock *tp = tcp_sk(sk); in dctcp_update_alpha()
|
D | tcp_bic.c | 78 tcp_sk(sk)->snd_ssthresh = initial_ssthresh; in bictcp_init() 143 struct tcp_sock *tp = tcp_sk(sk); in bictcp_cong_avoid() 163 const struct tcp_sock *tp = tcp_sk(sk); in bictcp_recalc_ssthresh() 185 const struct tcp_sock *tp = tcp_sk(sk); in bictcp_undo_cwnd()
|
D | tcp_scalable.c | 20 struct tcp_sock *tp = tcp_sk(sk); in tcp_scalable_cong_avoid() 34 const struct tcp_sock *tp = tcp_sk(sk); in tcp_scalable_ssthresh()
|
D | tcp_hybla.c | 38 tcp_sk(sk)->srtt_us / (rtt0 * USEC_PER_MSEC), in hybla_recalc_param() 47 struct tcp_sock *tp = tcp_sk(sk); in hybla_init() 91 struct tcp_sock *tp = tcp_sk(sk); in hybla_cong_avoid()
|
D | tcp_illinois.c | 58 struct tcp_sock *tp = tcp_sk(sk); in rtt_reset() 222 struct tcp_sock *tp = tcp_sk(sk); in update_params() 260 struct tcp_sock *tp = tcp_sk(sk); in tcp_illinois_cong_avoid() 295 struct tcp_sock *tp = tcp_sk(sk); in tcp_illinois_ssthresh()
|
D | tcp_yeah.c | 44 struct tcp_sock *tp = tcp_sk(sk); in tcp_yeah_init() 72 struct tcp_sock *tp = tcp_sk(sk); in tcp_yeah_cong_avoid() 206 const struct tcp_sock *tp = tcp_sk(sk); in tcp_yeah_ssthresh()
|
D | tcp_ipv4.c | 111 struct tcp_sock *tp = tcp_sk(sk); in tcp_twsk_unique() 145 struct tcp_sock *tp = tcp_sk(sk); in tcp_v4_connect() 276 u32 mtu = tcp_sk(sk)->mtu_info; in tcp_v4_mtu_reduced() 408 tp = tcp_sk(sk); in tcp_v4_err() 687 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), in tcp_v4_send_reset() 772 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), in tcp_v4_send_ack() 806 tcp_sk(sk)->snd_nxt; in tcp_v4_reqsk_send_ack() 875 const struct tcp_sock *tp = tcp_sk(sk); in tcp_md5_do_lookup() 916 struct tcp_sock *tp = tcp_sk(sk); in tcp_md5_do_add() 975 struct tcp_sock *tp = tcp_sk(sk); in tcp_clear_md5_list() [all …]
|
D | tcp_htcp.c | 69 const struct tcp_sock *tp = tcp_sk(sk); in htcp_cwnd_undo() 105 const struct tcp_sock *tp = tcp_sk(sk); in measure_achieved_throughput() 224 const struct tcp_sock *tp = tcp_sk(sk); in htcp_recalc_ssthresh() 233 struct tcp_sock *tp = tcp_sk(sk); in htcp_cong_avoid()
|
D | tcp.c | 383 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_sock() 453 const struct tcp_sock *tp = tcp_sk(sk); in tcp_poll() 550 struct tcp_sock *tp = tcp_sk(sk); in tcp_ioctl() 618 struct tcp_sock *tp = tcp_sk(sk); in skb_entail() 661 struct tcp_sock *tp = tcp_sk(sk); in tcp_push() 842 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_size_goal() 877 struct tcp_sock *tp = tcp_sk(sk); in do_tcp_sendpages() 1013 const struct tcp_sock *tp = tcp_sk(sk); in select_size() 1045 struct tcp_sock *tp = tcp_sk(sk); in tcp_sendmsg_fastopen() 1070 struct tcp_sock *tp = tcp_sk(sk); in tcp_sendmsg() [all …]
|
D | tcp_lp.c | 135 struct tcp_sock *tp = tcp_sk(sk); in tcp_lp_remote_hz_estimator() 188 struct tcp_sock *tp = tcp_sk(sk); in tcp_lp_owd_calculator() 265 struct tcp_sock *tp = tcp_sk(sk); in tcp_lp_pkts_acked()
|
D | tcp_veno.c | 119 struct tcp_sock *tp = tcp_sk(sk); in tcp_veno_cong_avoid() 192 const struct tcp_sock *tp = tcp_sk(sk); in tcp_veno_ssthresh()
|
D | tcp_vegas.c | 72 const struct tcp_sock *tp = tcp_sk(sk); in vegas_enable() 166 struct tcp_sock *tp = tcp_sk(sk); in tcp_vegas_cong_avoid()
|
D | tcp_diag.c | 22 const struct tcp_sock *tp = tcp_sk(sk); in tcp_diag_get_info()
|
D | tcp_cong.c | 410 struct tcp_sock *tp = tcp_sk(sk); in tcp_reno_cong_avoid() 429 const struct tcp_sock *tp = tcp_sk(sk); in tcp_reno_ssthresh()
|
D | tcp_minisocks.c | 272 const struct tcp_sock *tp = tcp_sk(sk); in tcp_time_wait() 370 struct tcp_sock *tp = tcp_sk(sk); in tcp_openreq_init_rwin() 446 struct tcp_sock *newtp = tcp_sk(newsk); in tcp_create_openreq_child()
|
D | tcp_metrics.c | 387 struct tcp_sock *tp = tcp_sk(sk); in tcp_update_metrics() 507 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_metrics() 633 struct tcp_sock *tp = tcp_sk(sk); in tcp_fetch_timewait_stamp() 660 struct tcp_sock *tp = tcp_sk(sk); in tcp_remember_stamp()
|
D | tcp_probe.c | 110 const struct tcp_sock *tp = tcp_sk(sk); in jtcp_rcv_established()
|
D | tcp_fastopen.c | 153 tp = tcp_sk(child); in tcp_fastopen_create_child()
|
D | syncookies.c | 297 struct tcp_sock *tp = tcp_sk(sk); in cookie_v4_check()
|
D | inet_connection_sock.c | 861 BUG_ON(tcp_sk(child)->fastopen_rsk != req); in inet_csk_listen_stop() 870 tcp_sk(child)->fastopen_rsk = NULL; in inet_csk_listen_stop()
|
D | af_inet.c | 613 tcp_sk(sk)->fastopen_req && in __inet_stream_connect() 614 tcp_sk(sk)->fastopen_req->data ? 1 : 0; in __inet_stream_connect()
|
/linux-4.1.27/include/net/ |
D | tcp.h | 332 tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies; in tcp_synq_overflow() 338 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; in tcp_synq_no_recent_overflow() 619 struct tcp_sock *tp = tcp_sk(sk); in tcp_fast_path_check() 981 const struct tcp_sock *tp = tcp_sk(sk); in tcp_current_ssthresh() 1037 const struct tcp_sock *tp = tcp_sk(sk); in tcp_is_cwnd_limited() 1048 const struct tcp_sock *tp = tcp_sk(sk); in tcp_check_probe_timer() 1173 int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout; in tcp_fin_time() 1380 tcp_clear_all_retrans_hints(tcp_sk(sk)); in tcp_write_queue_purge() 1457 if (tcp_sk(sk)->highest_sack == NULL) in tcp_add_write_queue_tail() 1458 tcp_sk(sk)->highest_sack = skb; in tcp_add_write_queue_tail() [all …]
|
/linux-4.1.27/net/ipv6/ |
D | tcp_ipv6.c | 121 struct tcp_sock *tp = tcp_sk(sk); in tcp_v6_connect() 313 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info); in tcp_v6_mtu_reduced() 367 tp = tcp_sk(sk); in tcp_v6_err() 743 struct sock *ctl_sk = net->ipv6.tcp_sk; in tcp_v6_send_response() 933 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, in tcp_v6_reqsk_send_ack() 1025 newtp = tcp_sk(newsk); in tcp_v6_syn_recv_sock() 1090 newtp = tcp_sk(newsk); in tcp_v6_syn_recv_sock() 1151 if (tcp_sk(sk)->rx_opt.user_mss && in tcp_v6_syn_recv_sock() 1152 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss) in tcp_v6_syn_recv_sock() 1153 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; in tcp_v6_syn_recv_sock() [all …]
|
D | syncookies.c | 162 struct tcp_sock *tp = tcp_sk(sk); in cookie_v6_check()
|
/linux-4.1.27/include/linux/ |
D | tcp.h | 344 static inline struct tcp_sock *tcp_sk(const struct sock *sk) in tcp_sk() function 374 tcp_sk(sk)->fastopen_rsk != NULL); in tcp_passive_fastopen()
|
/linux-4.1.27/include/net/netns/ |
D | ipv6.h | 70 struct sock *tcp_sk; member
|
D | ipv4.h | 57 struct sock * __percpu *tcp_sk; member
|
/linux-4.1.27/net/rds/ |
D | tcp.c | 86 return tcp_sk(tc->t_sock->sk)->snd_nxt; in rds_tcp_snd_nxt() 91 return tcp_sk(tc->t_sock->sk)->snd_una; in rds_tcp_snd_una()
|
/linux-4.1.27/net/core/ |
D | request_sock.c | 179 tcp_sk(sk)->fastopen_rsk = NULL; in reqsk_fastopen_remove()
|
D | sock.c | 868 sk->sk_tskey = tcp_sk(sk)->snd_una; in sock_setsockopt()
|
/linux-4.1.27/drivers/staging/lustre/lnet/klnds/socklnd/ |
D | socklnd_lib-linux.c | 569 tp = tcp_sk(sk); in ksocknal_lib_push_conn()
|
/linux-4.1.27/drivers/block/drbd/ |
D | drbd_debugfs.c | 393 struct tcp_sock *tp = tcp_sk(connection->data.socket->sk); in in_flight_summary_show()
|
/linux-4.1.27/net/sunrpc/ |
D | svcsock.c | 1345 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; in svc_tcp_init()
|
D | xprtsock.c | 2100 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; in xs_tcp_finish_connecting()
|