/linux-4.1.27/net/dccp/ |
D | timer.c | 37 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_write_timeout() local 41 if (icsk->icsk_retransmits != 0) in dccp_write_timeout() 43 retry_until = icsk->icsk_syn_retries ? in dccp_write_timeout() 46 if (icsk->icsk_retransmits >= sysctl_dccp_retries1) { in dccp_write_timeout() 76 if (icsk->icsk_retransmits >= retry_until) { in dccp_write_timeout() 89 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_retransmit_timer() local 102 if (icsk->icsk_retransmits == 0) in dccp_retransmit_timer() 110 if (--icsk->icsk_retransmits == 0) in dccp_retransmit_timer() 111 icsk->icsk_retransmits = 1; in dccp_retransmit_timer() 113 min(icsk->icsk_rto, in dccp_retransmit_timer() [all …]
|
D | output.c | 49 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_transmit_skb() local 76 if (icsk->icsk_retransmits == 0) in dccp_transmit_skb() 134 icsk->icsk_af_ops->send_check(sk, skb); in dccp_transmit_skb() 141 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); in dccp_transmit_skb() 164 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_sync_mss() local 170 cur_mps -= (icsk->icsk_af_ops->net_header_len + icsk->icsk_ext_hdr_len + in dccp_sync_mss() 190 icsk->icsk_pmtu_cookie = pmtu; in dccp_sync_mss() 535 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_connect() local 562 icsk->icsk_retransmits = 0; in dccp_connect() 564 icsk->icsk_rto, DCCP_RTO_MAX); in dccp_connect() [all …]
|
D | diag.c | 22 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_get_info() local 27 info->tcpi_retransmits = icsk->icsk_retransmits; in dccp_get_info() 28 info->tcpi_probes = icsk->icsk_probes_out; in dccp_get_info() 29 info->tcpi_backoff = icsk->icsk_backoff; in dccp_get_info() 30 info->tcpi_pmtu = icsk->icsk_pmtu_cookie; in dccp_get_info()
|
D | minisocks.c | 42 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_time_wait() local 43 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); in dccp_time_wait()
|
D | input.c | 405 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_rcv_request_sent_state_process() local 447 dccp_sync_mss(sk, icsk->icsk_pmtu_cookie); in dccp_rcv_request_sent_state_process() 476 icsk->icsk_af_ops->rebuild_header(sk); in dccp_rcv_request_sent_state_process() 483 if (sk->sk_write_pending || icsk->icsk_ack.pingpong || in dccp_rcv_request_sent_state_process() 484 icsk->icsk_accept_queue.rskq_defer_accept) { in dccp_rcv_request_sent_state_process()
|
D | proto.c | 176 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_init_sock() local 178 icsk->icsk_rto = DCCP_TIMEOUT_INIT; in dccp_init_sock() 179 icsk->icsk_syn_retries = sysctl_dccp_request_retries; in dccp_init_sock() 182 icsk->icsk_sync_mss = dccp_sync_mss; in dccp_init_sock() 253 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_disconnect() local 290 icsk->icsk_backoff = 0; in dccp_disconnect() 294 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); in dccp_disconnect()
|
D | ipv6.c | 799 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_v6_connect() local 867 u32 exthdrlen = icsk->icsk_ext_hdr_len; in dccp_v6_connect() 879 icsk->icsk_af_ops = &dccp_ipv6_mapped; in dccp_v6_connect() 884 icsk->icsk_ext_hdr_len = exthdrlen; in dccp_v6_connect() 885 icsk->icsk_af_ops = &dccp_ipv6_af_ops; in dccp_v6_connect() 924 icsk->icsk_ext_hdr_len = 0; in dccp_v6_connect() 926 icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen; in dccp_v6_connect()
|
/linux-4.1.27/net/ipv4/ |
D | tcp_timer.c | 102 static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) in tcp_mtu_probing() argument 108 if (!icsk->icsk_mtup.enabled) { in tcp_mtu_probing() 109 icsk->icsk_mtup.enabled = 1; in tcp_mtu_probing() 110 icsk->icsk_mtup.probe_timestamp = tcp_time_stamp; in tcp_mtu_probing() 111 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_mtu_probing() 117 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1; in tcp_mtu_probing() 120 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); in tcp_mtu_probing() 121 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_mtu_probing() 161 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_write_timeout() local 167 if (icsk->icsk_retransmits) { in tcp_write_timeout() [all …]
|
D | tcp_cong.c | 152 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_assign_congestion_control() local 158 icsk->icsk_ca_ops = ca; in tcp_assign_congestion_control() 172 memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); in tcp_assign_congestion_control() 177 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_init_congestion_control() local 179 if (icsk->icsk_ca_ops->init) in tcp_init_congestion_control() 180 icsk->icsk_ca_ops->init(sk); in tcp_init_congestion_control() 186 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_reinit_congestion_control() local 189 icsk->icsk_ca_ops = ca; in tcp_reinit_congestion_control() 190 icsk->icsk_ca_setsockopt = 1; in tcp_reinit_congestion_control() 192 if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init) in tcp_reinit_congestion_control() [all …]
|
D | inet_connection_sock.c | 248 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_wait_for_connect() local 270 if (reqsk_queue_empty(&icsk->icsk_accept_queue)) in inet_csk_wait_for_connect() 275 if (!reqsk_queue_empty(&icsk->icsk_accept_queue)) in inet_csk_wait_for_connect() 296 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_accept() local 297 struct request_sock_queue *queue = &icsk->icsk_accept_queue; in inet_csk_accept() 367 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_init_xmit_timers() local 369 setup_timer(&icsk->icsk_retransmit_timer, retransmit_handler, in inet_csk_init_xmit_timers() 371 setup_timer(&icsk->icsk_delack_timer, delack_handler, in inet_csk_init_xmit_timers() 374 icsk->icsk_pending = icsk->icsk_ack.pending = 0; in inet_csk_init_xmit_timers() 380 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_clear_xmit_timers() local [all …]
|
D | tcp_output.c | 74 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_new_data_sent() local 82 if (!prior_packets || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || in tcp_event_new_data_sent() 83 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { in tcp_event_new_data_sent() 164 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_data_sent() local 169 (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto)) in tcp_event_data_sent() 177 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato && in tcp_event_data_sent() 179 icsk->icsk_ack.pingpong = 1; in tcp_event_data_sent() 909 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_transmit_skb() local 1009 icsk->icsk_af_ops->send_check(sk, skb); in tcp_transmit_skb() 1031 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); in tcp_transmit_skb() [all …]
|
D | tcp_input.c | 132 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_measure_rcv_mss() local 133 const unsigned int lss = icsk->icsk_ack.last_seg_size; in tcp_measure_rcv_mss() 136 icsk->icsk_ack.last_seg_size = 0; in tcp_measure_rcv_mss() 142 if (len >= icsk->icsk_ack.rcv_mss) { in tcp_measure_rcv_mss() 143 icsk->icsk_ack.rcv_mss = len; in tcp_measure_rcv_mss() 164 icsk->icsk_ack.last_seg_size = len; in tcp_measure_rcv_mss() 166 icsk->icsk_ack.rcv_mss = len; in tcp_measure_rcv_mss() 170 if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) in tcp_measure_rcv_mss() 171 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2; in tcp_measure_rcv_mss() 172 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; in tcp_measure_rcv_mss() [all …]
|
D | tcp.c | 382 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_init_sock() local 390 icsk->icsk_rto = TCP_TIMEOUT_INIT; in tcp_init_sock() 419 icsk->icsk_sync_mss = tcp_sync_mss; in tcp_init_sock() 1369 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cleanup_rbuf() local 1372 if (icsk->icsk_ack.blocked || in tcp_cleanup_rbuf() 1374 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || in tcp_cleanup_rbuf() 1382 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) || in tcp_cleanup_rbuf() 1383 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && in tcp_cleanup_rbuf() 1384 !icsk->icsk_ack.pingpong)) && in tcp_cleanup_rbuf() 2171 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_disconnect() local [all …]
|
D | tcp_ipv4.c | 361 struct inet_connection_sock *icsk; in tcp_v4_err() local 407 icsk = inet_csk(sk); in tcp_v4_err() 455 if (seq != tp->snd_una || !icsk->icsk_retransmits || in tcp_v4_err() 456 !icsk->icsk_backoff || fastopen) in tcp_v4_err() 462 icsk->icsk_backoff--; in tcp_v4_err() 463 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : in tcp_v4_err() 465 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); in tcp_v4_err() 470 remaining = icsk->icsk_rto - in tcp_v4_err() 471 min(icsk->icsk_rto, in tcp_v4_err() 1761 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_v4_init_sock() local [all …]
|
D | inet_diag.c | 107 int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, in inet_sk_diag_fill() argument 176 if (!icsk) { in inet_sk_diag_fill() 183 if (icsk->icsk_pending == ICSK_TIME_RETRANS || in inet_sk_diag_fill() 184 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || in inet_sk_diag_fill() 185 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { in inet_sk_diag_fill() 187 r->idiag_retrans = icsk->icsk_retransmits; in inet_sk_diag_fill() 188 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout); in inet_sk_diag_fill() 189 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { in inet_sk_diag_fill() 191 r->idiag_retrans = icsk->icsk_probes_out; in inet_sk_diag_fill() 192 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout); in inet_sk_diag_fill() [all …]
|
D | tcp_htcp.c | 84 const struct inet_connection_sock *icsk = inet_csk(sk); in measure_rtt() local 92 if (icsk->icsk_ca_state == TCP_CA_Open) { in measure_rtt() 104 const struct inet_connection_sock *icsk = inet_csk(sk); in measure_achieved_throughput() local 109 if (icsk->icsk_ca_state == TCP_CA_Open) in measure_achieved_throughput() 119 if (!((1 << icsk->icsk_ca_state) & (TCPF_CA_Open | TCPF_CA_Disorder))) { in measure_achieved_throughput()
|
D | tcp_minisocks.c | 271 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_time_wait() local 283 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); in tcp_time_wait() 405 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_openreq_child() local 415 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); in tcp_ca_openreq_child() 416 icsk->icsk_ca_ops = ca; in tcp_ca_openreq_child() 424 (!icsk->icsk_ca_setsockopt || in tcp_ca_openreq_child() 425 !try_module_get(icsk->icsk_ca_ops->owner))) in tcp_ca_openreq_child()
|
D | inet_timewait_sock.c | 135 const struct inet_connection_sock *icsk = inet_csk(sk); in __inet_twsk_hashdance() local 146 tw->tw_tb = icsk->icsk_bind_hash; in __inet_twsk_hashdance() 147 WARN_ON(!icsk->icsk_bind_hash); in __inet_twsk_hashdance()
|
D | tcp_bic.c | 202 const struct inet_connection_sock *icsk = inet_csk(sk); in bictcp_acked() local 204 if (icsk->icsk_ca_state == TCP_CA_Open) { in bictcp_acked()
|
D | tcp_yeah.c | 61 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_yeah_pkts_acked() local 64 if (icsk->icsk_ca_state == TCP_CA_Open) in tcp_yeah_pkts_acked()
|
D | syncookies.c | 226 struct inet_connection_sock *icsk = inet_csk(sk); in get_cookie_sock() local 229 child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst); in get_cookie_sock()
|
D | ip_sockglue.c | 638 struct inet_connection_sock *icsk = inet_csk(sk); in do_ip_setsockopt() local 646 icsk->icsk_ext_hdr_len -= old->opt.optlen; in do_ip_setsockopt() 648 icsk->icsk_ext_hdr_len += opt->opt.optlen; in do_ip_setsockopt() 649 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); in do_ip_setsockopt()
|
D | tcp_metrics.c | 385 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_update_metrics() local 400 if (icsk->icsk_backoff || !tp->srtt_us) { in tcp_update_metrics() 465 icsk->icsk_ca_state == TCP_CA_Open) { in tcp_update_metrics()
|
/linux-4.1.27/include/net/ |
D | inet_connection_sock.h | 196 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_clear_xmit_timer() local 199 icsk->icsk_pending = 0; in inet_csk_clear_xmit_timer() 201 sk_stop_timer(sk, &icsk->icsk_retransmit_timer); in inet_csk_clear_xmit_timer() 204 icsk->icsk_ack.blocked = icsk->icsk_ack.pending = 0; in inet_csk_clear_xmit_timer() 206 sk_stop_timer(sk, &icsk->icsk_delack_timer); in inet_csk_clear_xmit_timer() 223 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_reset_xmit_timer() local 235 icsk->icsk_pending = what; in inet_csk_reset_xmit_timer() 236 icsk->icsk_timeout = jiffies + when; in inet_csk_reset_xmit_timer() 237 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); in inet_csk_reset_xmit_timer() 239 icsk->icsk_ack.pending |= ICSK_ACK_TIMER; in inet_csk_reset_xmit_timer() [all …]
|
D | tcp.h | 382 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_dec_quickack_mode() local 384 if (icsk->icsk_ack.quick) { in tcp_dec_quickack_mode() 385 if (pkts >= icsk->icsk_ack.quick) { in tcp_dec_quickack_mode() 386 icsk->icsk_ack.quick = 0; in tcp_dec_quickack_mode() 388 icsk->icsk_ack.ato = TCP_ATO_MIN; in tcp_dec_quickack_mode() 390 icsk->icsk_ack.quick -= pkts; in tcp_dec_quickack_mode() 873 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_needs_ecn() local 875 return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN; in tcp_ca_needs_ecn() 880 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_set_ca_state() local 882 if (icsk->icsk_ca_ops->set_state) in tcp_set_ca_state() [all …]
|
/linux-4.1.27/net/ipv6/ |
D | inet6_connection_sock.c | 123 struct inet_connection_sock *icsk = inet_csk(sk); in inet6_csk_search_req() local 124 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; in inet6_csk_search_req() 129 spin_lock(&icsk->icsk_accept_queue.syn_wait_lock); in inet6_csk_search_req() 143 spin_unlock(&icsk->icsk_accept_queue.syn_wait_lock); in inet6_csk_search_req() 153 struct inet_connection_sock *icsk = inet_csk(sk); in inet6_csk_reqsk_queue_hash_add() local 154 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; in inet6_csk_reqsk_queue_hash_add() 159 reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout); in inet6_csk_reqsk_queue_hash_add()
|
D | tcp_ipv6.c | 119 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_v6_connect() local 195 u32 exthdrlen = icsk->icsk_ext_hdr_len; in tcp_v6_connect() 207 icsk->icsk_af_ops = &ipv6_mapped; in tcp_v6_connect() 216 icsk->icsk_ext_hdr_len = exthdrlen; in tcp_v6_connect() 217 icsk->icsk_af_ops = &ipv6_specific; in tcp_v6_connect() 269 icsk->icsk_ext_hdr_len = 0; in tcp_v6_connect() 271 icsk->icsk_ext_hdr_len = opt->opt_flen + in tcp_v6_connect() 1626 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_v6_init_sock() local 1630 icsk->icsk_af_ops = &ipv6_specific; in tcp_v6_init_sock() 1686 const struct inet_connection_sock *icsk = inet_csk(sp); in get_tcp6_sock() local [all …]
|
D | ipv6_sockglue.c | 109 struct inet_connection_sock *icsk = inet_csk(sk); in ipv6_update_options() local 110 icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen; in ipv6_update_options() 111 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); in ipv6_update_options() 212 struct inet_connection_sock *icsk = inet_csk(sk); in do_ipv6_setsockopt() local 218 icsk->icsk_af_ops = &ipv4_specific; in do_ipv6_setsockopt() 221 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in do_ipv6_setsockopt()
|
D | syncookies.c | 48 struct inet_connection_sock *icsk = inet_csk(sk); in get_cookie_sock() local 51 child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst); in get_cookie_sock()
|
/linux-4.1.27/include/linux/ |
D | inet_diag.h | 30 int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
|