Home
last modified time | relevance | path

Searched refs:inet_csk (Results 1 – 36 of 36) sorted by relevance

/linux-4.4.14/include/net/
Dinet_connection_sock.h147 static inline struct inet_connection_sock *inet_csk(const struct sock *sk) in inet_csk() function
154 return (void *)inet_csk(sk)->icsk_ca_priv; in inet_csk_ca()
176 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED; in inet_csk_schedule_ack()
181 return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED; in inet_csk_ack_scheduled()
186 memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack)); in inet_csk_delack_init()
198 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_clear_xmit_timer()
225 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_reset_xmit_timer()
284 reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue); in inet_csk_reqsk_queue_added()
289 return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue); in inet_csk_reqsk_queue_len()
294 return reqsk_queue_len_young(&inet_csk(sk)->icsk_accept_queue); in inet_csk_reqsk_queue_young()
[all …]
Dtcp.h382 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_dec_quickack_mode()
625 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX) in tcp_bound_rto()
626 inet_csk(sk)->icsk_rto = TCP_RTO_MAX; in tcp_bound_rto()
913 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_needs_ecn()
920 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_set_ca_state()
929 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_event()
1017 (1 << inet_csk(sk)->icsk_ca_state); in tcp_in_cwnd_reduction()
1099 return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN); in tcp_probe0_base()
1106 u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff; in tcp_probe0_when()
1113 if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending) in tcp_check_probe_timer()
[all …]
/linux-4.4.14/net/ipv4/
Dinet_connection_sock.c140 if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) { in inet_csk_get_port()
201 if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true)) { in inet_csk_get_port()
238 if (!inet_csk(sk)->icsk_bind_hash) in inet_csk_get_port()
240 WARN_ON(inet_csk(sk)->icsk_bind_hash != tb); in inet_csk_get_port()
257 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_wait_for_connect()
305 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_accept()
374 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_init_xmit_timers()
387 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_clear_xmit_timers()
542 if (reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req)) { in inet_csk_reqsk_queue_drop()
543 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); in inet_csk_reqsk_queue_drop()
[all …]
Dtcp_timer.c139 if (!inet_csk(sk)->icsk_retransmits) in retransmits_timed_out()
161 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_write_timeout()
222 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_delack_timer_handler()
274 inet_csk(sk)->icsk_ack.blocked = 1; in tcp_delack_timer()
286 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_probe_timer()
336 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_fastopen_synack_timer()
366 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_retransmit_timer()
504 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_write_timer_handler()
577 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_keepalive_timer()
Dtcp_cong.c155 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_assign_congestion_control()
184 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_init_congestion_control()
197 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_reinit_congestion_control()
210 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cleanup_congestion_control()
340 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_set_congestion_control()
Dtcp_output.c74 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_new_data_sent()
153 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) in tcp_cwnd_restart()
164 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_data_sent()
829 inet_csk(sk)->icsk_af_ops->mtu_reduced(sk); in tcp_release_cb()
906 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_transmit_skb()
1288 const struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_mtu_to_mss()
1329 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mss_to_mtu()
1351 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtup_init()
1389 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_sync_mss()
1424 if (mtu != inet_csk(sk)->icsk_pmtu_cookie) in tcp_current_mss()
[all …]
Dtcp_fastopen.c133 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; in tcp_fastopen_create_child()
142 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, in tcp_fastopen_create_child()
175 inet_csk(child)->icsk_af_ops->rebuild_header(child); in tcp_fastopen_create_child()
230 fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq; in tcp_fastopen_queue_check()
Dtcp_input.c134 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_measure_rcv_mss()
180 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_incr_quickack()
191 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_enter_quickack_mode()
203 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_in_quickack_mode()
351 return 2 * inet_csk(sk)->icsk_ack.rcv_mss; in __tcp_grow_window()
381 inet_csk(sk)->icsk_ack.quick |= 1; in tcp_grow_window()
447 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clamp_window()
478 inet_csk(sk)->icsk_ack.rcv_mss = hint; in tcp_initialize_rcv_mss()
548 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) in tcp_rcv_rtt_measure_ts()
635 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_data_recv()
[all …]
Dtcp_dctcp.c108 inet_csk(sk)->icsk_ca_ops = &dctcp_reno; in dctcp_init()
196 acked_bytes = inet_csk(sk)->icsk_ack.rcv_mss; in dctcp_update_alpha()
297 if (inet_csk(sk)->icsk_ca_ops != &dctcp_reno) { in dctcp_get_info()
Dtcp_minisocks.c269 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_time_wait()
411 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_openreq_child()
453 struct inet_connection_sock *newicsk = inet_csk(newsk); in tcp_create_openreq_child()
755 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && in tcp_check_req()
768 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, in tcp_check_req()
Dinet_hashtables.c97 inet_csk(sk)->icsk_bind_hash = tb; in inet_bind_hash()
112 tb = inet_csk(sk)->icsk_bind_hash; in __inet_put_port()
115 inet_csk(sk)->icsk_bind_hash = NULL; in __inet_put_port()
139 tb = inet_csk(sk)->icsk_bind_hash; in __inet_inherit_port()
595 tb = inet_csk(sk)->icsk_bind_hash; in __inet_hash_connect()
Dtcp_recovery.c28 if (inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery || !tp->rack.advanced) in tcp_rack_mark_lost()
Dtcp_htcp.c84 const struct inet_connection_sock *icsk = inet_csk(sk); in measure_rtt()
104 const struct inet_connection_sock *icsk = inet_csk(sk); in measure_achieved_throughput()
Dtcp_ipv4.c209 inet_csk(sk)->icsk_ext_hdr_len = 0; in tcp_v4_connect()
211 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen; in tcp_v4_connect()
292 inet_csk(sk)->icsk_pmtu_cookie > mtu) { in tcp_v4_mtu_reduced()
410 icsk = inet_csk(sk); in tcp_v4_err()
1292 inet_csk(newsk)->icsk_ext_hdr_len = 0; in tcp_v4_syn_recv_sock()
1294 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen; in tcp_v4_syn_recv_sock()
1773 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_v4_init_sock()
1813 if (inet_csk(sk)->icsk_bind_hash) in tcp_v4_destroy_sock()
1864 icsk = inet_csk(sk); in listening_get_next()
2161 const struct inet_connection_sock *icsk = inet_csk(sk); in get_tcp4_sock()
Dtcp.c382 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_init_sock()
1397 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cleanup_rbuf()
1925 if (inet_csk(sk)->icsk_bind_hash && in tcp_set_state()
2204 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_disconnect()
2327 struct inet_connection_sock *icsk = inet_csk(sk); in do_tcp_setsockopt()
2613 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_setsockopt()
2638 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_get_info()
2727 struct inet_connection_sock *icsk = inet_csk(sk); in do_tcp_getsockopt()
2917 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_getsockopt()
Dtcp_yeah.c61 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_yeah_pkts_acked()
Dtcp_bic.c202 const struct inet_connection_sock *icsk = inet_csk(sk); in bictcp_acked()
Dinet_timewait_sock.c105 const struct inet_connection_sock *icsk = inet_csk(sk); in __inet_twsk_hashdance()
Dtcp_metrics.c369 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_update_metrics()
554 inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk)); in tcp_init_metrics()
565 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK; in tcp_init_metrics()
Dsyncookies.c222 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_get_cookie_sock()
Dcipso_ipv4.c1938 sk_conn = inet_csk(sk); in cipso_v4_sock_setattr()
2110 struct inet_connection_sock *sk_conn = inet_csk(sk); in cipso_v4_sock_delattr()
Dinet_diag.c261 return inet_sk_diag_fill(sk, inet_csk(sk), skb, req, in inet_csk_diag_fill()
Dip_sockglue.c639 struct inet_connection_sock *icsk = inet_csk(sk); in do_ip_setsockopt()
Daf_inet.c222 !inet_csk(sk)->icsk_accept_queue.fastopenq.max_qlen) { in inet_listen()
/linux-4.4.14/net/dccp/
Dtimer.c37 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_write_timeout()
89 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_retransmit_timer()
131 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_write_timer()
176 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_delack_timer()
Doutput.c49 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_transmit_skb()
164 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_sync_mss()
271 inet_csk(sk)->icsk_rto, in dccp_xmit_packet()
384 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0) in dccp_retransmit_skb()
388 inet_csk(sk)->icsk_retransmits++; in dccp_retransmit_skb()
515 int err = inet_csk(sk)->icsk_af_ops->rebuild_header(sk); in dccp_send_reset()
540 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_connect()
584 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; in dccp_send_ack()
604 struct inet_connection_sock *icsk = inet_csk(sk);
Dminisocks.c42 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_time_wait()
89 struct inet_connection_sock *newicsk = inet_csk(newsk); in dccp_create_openreq_child()
186 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, in dccp_check_req()
Dipv6.c146 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) in dccp_v6_err()
418 inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped; in dccp_v6_request_recv_sock()
435 dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie); in dccp_v6_request_recv_sock()
502 inet_csk(newsk)->icsk_ext_hdr_len = 0; in dccp_v6_request_recv_sock()
504 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen + in dccp_v6_request_recv_sock()
763 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_v6_connect()
967 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops; in dccp_v6_init_sock()
Dproto.c100 if (inet_csk(sk)->icsk_bind_hash != NULL && in dccp_set_state()
176 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_init_sock()
214 if (inet_csk(sk)->icsk_bind_hash != NULL) in dccp_destroy_sock()
253 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_disconnect()
561 return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level, in dccp_setsockopt()
682 return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level, in dccp_getsockopt()
Ddiag.c22 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_get_info()
Dipv4.c96 inet_csk(sk)->icsk_ext_hdr_len = 0; in dccp_v4_connect()
98 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen; in dccp_v4_connect()
176 inet_csk(sk)->icsk_pmtu_cookie > mtu) { in dccp_do_pmtu_discovery()
919 inet_csk(sk)->icsk_af_ops = &dccp_ipv4_af_ops; in dccp_v4_init_sock()
Dinput.c405 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_rcv_request_sent_state_process()
606 if (inet_csk(sk)->icsk_af_ops->conn_request(sk, in dccp_rcv_state_process()
/linux-4.4.14/net/core/
Drequest_sock.c103 fastopenq = &inet_csk(lsk)->icsk_accept_queue.fastopenq; in reqsk_fastopen_remove()
/linux-4.4.14/net/ipv6/
Dtcp_ipv6.c118 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_v6_connect()
314 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) { in tcp_v6_mtu_reduced()
1012 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; in tcp_v6_syn_recv_sock()
1038 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie); in tcp_v6_syn_recv_sock()
1112 inet_csk(newsk)->icsk_ext_hdr_len = 0; in tcp_v6_syn_recv_sock()
1114 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen + in tcp_v6_syn_recv_sock()
1638 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_v6_init_sock()
1699 const struct inet_connection_sock *icsk = inet_csk(sp); in get_tcp6_sock()
Dipv6_sockglue.c109 struct inet_connection_sock *icsk = inet_csk(sk); in ipv6_update_options()
212 struct inet_connection_sock *icsk = inet_csk(sk); in do_ipv6_setsockopt()
/linux-4.4.14/include/linux/
Dtcp.h394 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; in fastopen_queue_tune()