/linux-4.4.14/include/net/ |
D | inet_connection_sock.h | 147 static inline struct inet_connection_sock *inet_csk(const struct sock *sk) in inet_csk() function 154 return (void *)inet_csk(sk)->icsk_ca_priv; in inet_csk_ca() 176 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED; in inet_csk_schedule_ack() 181 return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED; in inet_csk_ack_scheduled() 186 memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack)); in inet_csk_delack_init() 198 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_clear_xmit_timer() 225 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_reset_xmit_timer() 284 reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue); in inet_csk_reqsk_queue_added() 289 return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue); in inet_csk_reqsk_queue_len() 294 return reqsk_queue_len_young(&inet_csk(sk)->icsk_accept_queue); in inet_csk_reqsk_queue_young() [all …]
|
D | tcp.h | 382 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_dec_quickack_mode() 625 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX) in tcp_bound_rto() 626 inet_csk(sk)->icsk_rto = TCP_RTO_MAX; in tcp_bound_rto() 913 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_needs_ecn() 920 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_set_ca_state() 929 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_event() 1017 (1 << inet_csk(sk)->icsk_ca_state); in tcp_in_cwnd_reduction() 1099 return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN); in tcp_probe0_base() 1106 u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff; in tcp_probe0_when() 1113 if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending) in tcp_check_probe_timer() [all …]
|
/linux-4.4.14/net/ipv4/ |
D | inet_connection_sock.c | 140 if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) { in inet_csk_get_port() 201 if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true)) { in inet_csk_get_port() 238 if (!inet_csk(sk)->icsk_bind_hash) in inet_csk_get_port() 240 WARN_ON(inet_csk(sk)->icsk_bind_hash != tb); in inet_csk_get_port() 257 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_wait_for_connect() 305 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_accept() 374 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_init_xmit_timers() 387 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_clear_xmit_timers() 542 if (reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req)) { in inet_csk_reqsk_queue_drop() 543 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); in inet_csk_reqsk_queue_drop() [all …]
|
D | tcp_timer.c | 139 if (!inet_csk(sk)->icsk_retransmits) in retransmits_timed_out() 161 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_write_timeout() 222 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_delack_timer_handler() 274 inet_csk(sk)->icsk_ack.blocked = 1; in tcp_delack_timer() 286 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_probe_timer() 336 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_fastopen_synack_timer() 366 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_retransmit_timer() 504 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_write_timer_handler() 577 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_keepalive_timer()
|
D | tcp_cong.c | 155 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_assign_congestion_control() 184 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_init_congestion_control() 197 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_reinit_congestion_control() 210 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cleanup_congestion_control() 340 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_set_congestion_control()
|
D | tcp_output.c | 74 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_new_data_sent() 153 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) in tcp_cwnd_restart() 164 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_data_sent() 829 inet_csk(sk)->icsk_af_ops->mtu_reduced(sk); in tcp_release_cb() 906 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_transmit_skb() 1288 const struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_mtu_to_mss() 1329 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mss_to_mtu() 1351 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtup_init() 1389 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_sync_mss() 1424 if (mtu != inet_csk(sk)->icsk_pmtu_cookie) in tcp_current_mss() [all …]
|
D | tcp_fastopen.c | 133 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; in tcp_fastopen_create_child() 142 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, in tcp_fastopen_create_child() 175 inet_csk(child)->icsk_af_ops->rebuild_header(child); in tcp_fastopen_create_child() 230 fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq; in tcp_fastopen_queue_check()
|
D | tcp_input.c | 134 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_measure_rcv_mss() 180 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_incr_quickack() 191 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_enter_quickack_mode() 203 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_in_quickack_mode() 351 return 2 * inet_csk(sk)->icsk_ack.rcv_mss; in __tcp_grow_window() 381 inet_csk(sk)->icsk_ack.quick |= 1; in tcp_grow_window() 447 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clamp_window() 478 inet_csk(sk)->icsk_ack.rcv_mss = hint; in tcp_initialize_rcv_mss() 548 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) in tcp_rcv_rtt_measure_ts() 635 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_data_recv() [all …]
|
D | tcp_dctcp.c | 108 inet_csk(sk)->icsk_ca_ops = &dctcp_reno; in dctcp_init() 196 acked_bytes = inet_csk(sk)->icsk_ack.rcv_mss; in dctcp_update_alpha() 297 if (inet_csk(sk)->icsk_ca_ops != &dctcp_reno) { in dctcp_get_info()
|
D | tcp_minisocks.c | 269 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_time_wait() 411 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_openreq_child() 453 struct inet_connection_sock *newicsk = inet_csk(newsk); in tcp_create_openreq_child() 755 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && in tcp_check_req() 768 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, in tcp_check_req()
|
D | inet_hashtables.c | 97 inet_csk(sk)->icsk_bind_hash = tb; in inet_bind_hash() 112 tb = inet_csk(sk)->icsk_bind_hash; in __inet_put_port() 115 inet_csk(sk)->icsk_bind_hash = NULL; in __inet_put_port() 139 tb = inet_csk(sk)->icsk_bind_hash; in __inet_inherit_port() 595 tb = inet_csk(sk)->icsk_bind_hash; in __inet_hash_connect()
|
D | tcp_recovery.c | 28 if (inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery || !tp->rack.advanced) in tcp_rack_mark_lost()
|
D | tcp_htcp.c | 84 const struct inet_connection_sock *icsk = inet_csk(sk); in measure_rtt() 104 const struct inet_connection_sock *icsk = inet_csk(sk); in measure_achieved_throughput()
|
D | tcp_ipv4.c | 209 inet_csk(sk)->icsk_ext_hdr_len = 0; in tcp_v4_connect() 211 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen; in tcp_v4_connect() 292 inet_csk(sk)->icsk_pmtu_cookie > mtu) { in tcp_v4_mtu_reduced() 410 icsk = inet_csk(sk); in tcp_v4_err() 1292 inet_csk(newsk)->icsk_ext_hdr_len = 0; in tcp_v4_syn_recv_sock() 1294 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen; in tcp_v4_syn_recv_sock() 1773 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_v4_init_sock() 1813 if (inet_csk(sk)->icsk_bind_hash) in tcp_v4_destroy_sock() 1864 icsk = inet_csk(sk); in listening_get_next() 2161 const struct inet_connection_sock *icsk = inet_csk(sk); in get_tcp4_sock()
|
D | tcp.c | 382 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_init_sock() 1397 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cleanup_rbuf() 1925 if (inet_csk(sk)->icsk_bind_hash && in tcp_set_state() 2204 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_disconnect() 2327 struct inet_connection_sock *icsk = inet_csk(sk); in do_tcp_setsockopt() 2613 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_setsockopt() 2638 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_get_info() 2727 struct inet_connection_sock *icsk = inet_csk(sk); in do_tcp_getsockopt() 2917 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_getsockopt()
|
D | tcp_yeah.c | 61 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_yeah_pkts_acked()
|
D | tcp_bic.c | 202 const struct inet_connection_sock *icsk = inet_csk(sk); in bictcp_acked()
|
D | inet_timewait_sock.c | 105 const struct inet_connection_sock *icsk = inet_csk(sk); in __inet_twsk_hashdance()
|
D | tcp_metrics.c | 369 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_update_metrics() 554 inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk)); in tcp_init_metrics() 565 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK; in tcp_init_metrics()
|
D | syncookies.c | 222 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_get_cookie_sock()
|
D | cipso_ipv4.c | 1938 sk_conn = inet_csk(sk); in cipso_v4_sock_setattr() 2110 struct inet_connection_sock *sk_conn = inet_csk(sk); in cipso_v4_sock_delattr()
|
D | inet_diag.c | 261 return inet_sk_diag_fill(sk, inet_csk(sk), skb, req, in inet_csk_diag_fill()
|
D | ip_sockglue.c | 639 struct inet_connection_sock *icsk = inet_csk(sk); in do_ip_setsockopt()
|
D | af_inet.c | 222 !inet_csk(sk)->icsk_accept_queue.fastopenq.max_qlen) { in inet_listen()
|
/linux-4.4.14/net/dccp/ |
D | timer.c | 37 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_write_timeout() 89 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_retransmit_timer() 131 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_write_timer() 176 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_delack_timer()
|
D | output.c | 49 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_transmit_skb() 164 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_sync_mss() 271 inet_csk(sk)->icsk_rto, in dccp_xmit_packet() 384 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0) in dccp_retransmit_skb() 388 inet_csk(sk)->icsk_retransmits++; in dccp_retransmit_skb() 515 int err = inet_csk(sk)->icsk_af_ops->rebuild_header(sk); in dccp_send_reset() 540 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_connect() 584 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; in dccp_send_ack() 604 struct inet_connection_sock *icsk = inet_csk(sk);
|
D | minisocks.c | 42 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_time_wait() 89 struct inet_connection_sock *newicsk = inet_csk(newsk); in dccp_create_openreq_child() 186 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, in dccp_check_req()
|
D | ipv6.c | 146 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) in dccp_v6_err() 418 inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped; in dccp_v6_request_recv_sock() 435 dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie); in dccp_v6_request_recv_sock() 502 inet_csk(newsk)->icsk_ext_hdr_len = 0; in dccp_v6_request_recv_sock() 504 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen + in dccp_v6_request_recv_sock() 763 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_v6_connect() 967 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops; in dccp_v6_init_sock()
|
D | proto.c | 100 if (inet_csk(sk)->icsk_bind_hash != NULL && in dccp_set_state() 176 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_init_sock() 214 if (inet_csk(sk)->icsk_bind_hash != NULL) in dccp_destroy_sock() 253 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_disconnect() 561 return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level, in dccp_setsockopt() 682 return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level, in dccp_getsockopt()
|
D | diag.c | 22 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_get_info()
|
D | ipv4.c | 96 inet_csk(sk)->icsk_ext_hdr_len = 0; in dccp_v4_connect() 98 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen; in dccp_v4_connect() 176 inet_csk(sk)->icsk_pmtu_cookie > mtu) { in dccp_do_pmtu_discovery() 919 inet_csk(sk)->icsk_af_ops = &dccp_ipv4_af_ops; in dccp_v4_init_sock()
|
D | input.c | 405 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_rcv_request_sent_state_process() 606 if (inet_csk(sk)->icsk_af_ops->conn_request(sk, in dccp_rcv_state_process()
|
/linux-4.4.14/net/core/ |
D | request_sock.c | 103 fastopenq = &inet_csk(lsk)->icsk_accept_queue.fastopenq; in reqsk_fastopen_remove()
|
/linux-4.4.14/net/ipv6/ |
D | tcp_ipv6.c | 118 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_v6_connect() 314 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) { in tcp_v6_mtu_reduced() 1012 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; in tcp_v6_syn_recv_sock() 1038 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie); in tcp_v6_syn_recv_sock() 1112 inet_csk(newsk)->icsk_ext_hdr_len = 0; in tcp_v6_syn_recv_sock() 1114 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen + in tcp_v6_syn_recv_sock() 1638 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_v6_init_sock() 1699 const struct inet_connection_sock *icsk = inet_csk(sp); in get_tcp6_sock()
|
D | ipv6_sockglue.c | 109 struct inet_connection_sock *icsk = inet_csk(sk); in ipv6_update_options() 212 struct inet_connection_sock *icsk = inet_csk(sk); in do_ipv6_setsockopt()
|
/linux-4.4.14/include/linux/ |
D | tcp.h | 394 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; in fastopen_queue_tune()
|