icsk 1137 drivers/crypto/chelsio/chtls/chtls_cm.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 1139 drivers/crypto/chelsio/chtls/chtls_cm.c if (!try_module_get(icsk->icsk_ulp_ops->owner)) icsk 750 drivers/xen/pvcalls-back.c struct inet_connection_sock *icsk; icsk 776 drivers/xen/pvcalls-back.c icsk = inet_csk(mappass->sock->sk); icsk 777 drivers/xen/pvcalls-back.c queue = &icsk->icsk_accept_queue; icsk 39 include/linux/inet_diag.h int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, icsk 361 include/linux/skmsg.h struct inet_connection_sock *icsk = inet_csk(sk); icsk 362 include/linux/skmsg.h bool has_ulp = !!icsk->icsk_ulp_data; icsk 197 include/net/inet_connection_sock.h struct inet_connection_sock *icsk = inet_csk(sk); icsk 200 include/net/inet_connection_sock.h icsk->icsk_pending = 0; icsk 202 include/net/inet_connection_sock.h sk_stop_timer(sk, &icsk->icsk_retransmit_timer); icsk 205 include/net/inet_connection_sock.h icsk->icsk_ack.blocked = icsk->icsk_ack.pending = 0; icsk 207 include/net/inet_connection_sock.h sk_stop_timer(sk, &icsk->icsk_delack_timer); icsk 221 include/net/inet_connection_sock.h struct inet_connection_sock *icsk = inet_csk(sk); icsk 232 include/net/inet_connection_sock.h icsk->icsk_pending = what; icsk 233 include/net/inet_connection_sock.h icsk->icsk_timeout = jiffies + when; icsk 234 include/net/inet_connection_sock.h sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); icsk 236 include/net/inet_connection_sock.h icsk->icsk_ack.pending |= ICSK_ACK_TIMER; icsk 237 include/net/inet_connection_sock.h icsk->icsk_ack.timeout = jiffies + when; icsk 238 include/net/inet_connection_sock.h sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); icsk 245 include/net/inet_connection_sock.h inet_csk_rto_backoff(const struct inet_connection_sock *icsk, icsk 248 include/net/inet_connection_sock.h u64 when = (u64)icsk->icsk_rto << icsk->icsk_backoff; icsk 333 include/net/inet_connection_sock.h struct inet_connection_sock *icsk = inet_csk(sk); icsk 335 include/net/inet_connection_sock.h if (icsk->icsk_ack.pingpong < U8_MAX) icsk 336 include/net/inet_connection_sock.h icsk->icsk_ack.pingpong++; icsk 349 include/net/tcp.h struct inet_connection_sock *icsk = inet_csk(sk); icsk 351 include/net/tcp.h if (icsk->icsk_ack.quick) { icsk 352 include/net/tcp.h if (pkts >= icsk->icsk_ack.quick) { icsk 353 include/net/tcp.h icsk->icsk_ack.quick = 0; icsk 355 include/net/tcp.h icsk->icsk_ack.ato = TCP_ATO_MIN; icsk 357 include/net/tcp.h icsk->icsk_ack.quick -= pkts; icsk 1111 include/net/tcp.h const struct inet_connection_sock *icsk = inet_csk(sk); icsk 1113 include/net/tcp.h return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN; icsk 1118 include/net/tcp.h struct inet_connection_sock *icsk = inet_csk(sk); icsk 1120 include/net/tcp.h if (icsk->icsk_ca_ops->set_state) icsk 1121 include/net/tcp.h icsk->icsk_ca_ops->set_state(sk, ca_state); icsk 1122 include/net/tcp.h icsk->icsk_ca_state = ca_state; icsk 1127 include/net/tcp.h const struct inet_connection_sock *icsk = inet_csk(sk); icsk 1129 include/net/tcp.h if (icsk->icsk_ca_ops->cwnd_event) icsk 1130 include/net/tcp.h icsk->icsk_ca_ops->cwnd_event(sk, event); icsk 1447 include/net/tcp.h const struct inet_connection_sock *icsk = &tp->inet_conn; icsk 1449 include/net/tcp.h return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime, icsk 2285 include/net/tcp.h void clean_acked_data_enable(struct inet_connection_sock *icsk, icsk 2287 include/net/tcp.h void clean_acked_data_disable(struct inet_connection_sock *icsk); icsk 483 include/net/tls.h struct inet_connection_sock *icsk = inet_csk(sk); icsk 488 include/net/tls.h return (__force void *)icsk->icsk_ulp_data; icsk 4408 net/core/filter.c struct inet_connection_sock *icsk; icsk 4413 net/core/filter.c icsk = inet_csk(sk); icsk 4415 net/core/filter.c if (!icsk->icsk_ca_ops || optlen <= 1) icsk 4417 net/core/filter.c strncpy(optval, icsk->icsk_ca_ops->name, optlen); icsk 343 net/core/sock_map.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 354 net/core/sock_map.c if (unlikely(rcu_access_pointer(icsk->icsk_ulp_data))) icsk 667 net/core/sock_map.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 678 net/core/sock_map.c if (unlikely(icsk->icsk_ulp_data)) icsk 19 net/dccp/diag.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 24 net/dccp/diag.c info->tcpi_retransmits = icsk->icsk_retransmits; icsk 25 net/dccp/diag.c info->tcpi_probes = icsk->icsk_probes_out; icsk 26 net/dccp/diag.c info->tcpi_backoff = icsk->icsk_backoff; icsk 27 net/dccp/diag.c info->tcpi_pmtu = icsk->icsk_pmtu_cookie; icsk 401 net/dccp/input.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 443 net/dccp/input.c dccp_sync_mss(sk, icsk->icsk_pmtu_cookie); icsk 472 net/dccp/input.c icsk->icsk_af_ops->rebuild_header(sk); icsk 480 net/dccp/input.c icsk->icsk_accept_queue.rskq_defer_accept) { icsk 807 net/dccp/ipv6.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 875 net/dccp/ipv6.c u32 exthdrlen = icsk->icsk_ext_hdr_len; icsk 887 net/dccp/ipv6.c icsk->icsk_af_ops = &dccp_ipv6_mapped; icsk 892 net/dccp/ipv6.c icsk->icsk_ext_hdr_len = exthdrlen; icsk 893 net/dccp/ipv6.c icsk->icsk_af_ops = &dccp_ipv6_af_ops; icsk 932 net/dccp/ipv6.c icsk->icsk_ext_hdr_len = 0; icsk 934 net/dccp/ipv6.c icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen; icsk 38 net/dccp/minisocks.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 39 net/dccp/minisocks.c const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); icsk 46 net/dccp/output.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 73 net/dccp/output.c if (icsk->icsk_retransmits == 0) icsk 131 net/dccp/output.c icsk->icsk_af_ops->send_check(sk, skb); icsk 138 net/dccp/output.c err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); icsk 161 net/dccp/output.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 167 net/dccp/output.c cur_mps -= (icsk->icsk_af_ops->net_header_len + icsk->icsk_ext_hdr_len + icsk 187 net/dccp/output.c icsk->icsk_pmtu_cookie = pmtu; icsk 537 net/dccp/output.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 564 net/dccp/output.c icsk->icsk_retransmits = 0; icsk 566 net/dccp/output.c icsk->icsk_rto, DCCP_RTO_MAX); icsk 601 net/dccp/output.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 610 net/dccp/output.c if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { icsk 616 net/dccp/output.c if (icsk->icsk_ack.blocked) { icsk 621 net/dccp/output.c if (!time_before(timeout, icsk->icsk_ack.timeout)) icsk 622 net/dccp/output.c timeout = icsk->icsk_ack.timeout; icsk 624 net/dccp/output.c icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; icsk 625 net/dccp/output.c icsk->icsk_ack.timeout = timeout; icsk 626 net/dccp/output.c sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); icsk 186 net/dccp/proto.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 188 net/dccp/proto.c icsk->icsk_rto = DCCP_TIMEOUT_INIT; icsk 189 net/dccp/proto.c icsk->icsk_syn_retries = sysctl_dccp_request_retries; icsk 193 net/dccp/proto.c icsk->icsk_sync_mss = dccp_sync_mss; icsk 260 net/dccp/proto.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 299 net/dccp/proto.c icsk->icsk_backoff = 0; icsk 303 net/dccp/proto.c WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); icsk 33 net/dccp/timer.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 37 net/dccp/timer.c if (icsk->icsk_retransmits != 0) icsk 39 net/dccp/timer.c retry_until = icsk->icsk_syn_retries ? icsk 42 net/dccp/timer.c if (icsk->icsk_retransmits >= sysctl_dccp_retries1) { icsk 72 net/dccp/timer.c if (icsk->icsk_retransmits >= retry_until) { icsk 85 net/dccp/timer.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 98 net/dccp/timer.c if (icsk->icsk_retransmits == 0) icsk 106 net/dccp/timer.c if (--icsk->icsk_retransmits == 0) icsk 107 net/dccp/timer.c icsk->icsk_retransmits = 1; icsk 109 net/dccp/timer.c min(icsk->icsk_rto, icsk 115 net/dccp/timer.c icsk->icsk_backoff++; icsk 117 net/dccp/timer.c icsk->icsk_rto = min(icsk->icsk_rto << 1, DCCP_RTO_MAX); icsk 118 net/dccp/timer.c inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, icsk 120 net/dccp/timer.c if (icsk->icsk_retransmits > sysctl_dccp_retries1) icsk 126 net/dccp/timer.c struct inet_connection_sock *icsk = icsk 127 net/dccp/timer.c from_timer(icsk, t, icsk_retransmit_timer); icsk 128 net/dccp/timer.c struct sock *sk = &icsk->icsk_inet.sk; icsk 134 net/dccp/timer.c sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk 139 net/dccp/timer.c if (sk->sk_state == DCCP_CLOSED || !icsk->icsk_pending) icsk 142 net/dccp/timer.c if (time_after(icsk->icsk_timeout, jiffies)) { icsk 143 net/dccp/timer.c sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk 144 net/dccp/timer.c icsk->icsk_timeout); icsk 148 net/dccp/timer.c event = icsk->icsk_pending; icsk 149 net/dccp/timer.c icsk->icsk_pending = 0; icsk 172 net/dccp/timer.c struct inet_connection_sock *icsk = icsk 173 net/dccp/timer.c from_timer(icsk, t, icsk_delack_timer); icsk 174 net/dccp/timer.c struct sock *sk = &icsk->icsk_inet.sk; icsk 179 net/dccp/timer.c icsk->icsk_ack.blocked = 1; icsk 181 net/dccp/timer.c sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk 187 net/dccp/timer.c !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) icsk 189 net/dccp/timer.c if (time_after(icsk->icsk_ack.timeout, jiffies)) { icsk 190 net/dccp/timer.c sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk 191 net/dccp/timer.c icsk->icsk_ack.timeout); icsk 195 net/dccp/timer.c icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; icsk 200 net/dccp/timer.c icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk 201 net/dccp/timer.c icsk->icsk_rto); icsk 207 net/dccp/timer.c icsk->icsk_ack.ato = TCP_ATO_MIN; icsk 396 net/ipv4/inet_connection_sock.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 418 net/ipv4/inet_connection_sock.c if (reqsk_queue_empty(&icsk->icsk_accept_queue)) icsk 423 net/ipv4/inet_connection_sock.c if (!reqsk_queue_empty(&icsk->icsk_accept_queue)) icsk 444 net/ipv4/inet_connection_sock.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 445 net/ipv4/inet_connection_sock.c struct request_sock_queue *queue = &icsk->icsk_accept_queue; icsk 533 net/ipv4/inet_connection_sock.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 535 net/ipv4/inet_connection_sock.c timer_setup(&icsk->icsk_retransmit_timer, retransmit_handler, 0); icsk 536 net/ipv4/inet_connection_sock.c timer_setup(&icsk->icsk_delack_timer, delack_handler, 0); icsk 538 net/ipv4/inet_connection_sock.c icsk->icsk_pending = icsk->icsk_ack.pending = 0; icsk 544 net/ipv4/inet_connection_sock.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 546 net/ipv4/inet_connection_sock.c icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0; icsk 548 net/ipv4/inet_connection_sock.c sk_stop_timer(sk, &icsk->icsk_retransmit_timer); icsk 549 net/ipv4/inet_connection_sock.c sk_stop_timer(sk, &icsk->icsk_delack_timer); icsk 715 net/ipv4/inet_connection_sock.c struct inet_connection_sock *icsk = inet_csk(sk_listener); icsk 716 net/ipv4/inet_connection_sock.c struct request_sock_queue *queue = &icsk->icsk_accept_queue; icsk 724 net/ipv4/inet_connection_sock.c max_retries = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_synack_retries; icsk 894 net/ipv4/inet_connection_sock.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 898 net/ipv4/inet_connection_sock.c reqsk_queue_alloc(&icsk->icsk_accept_queue); icsk 995 net/ipv4/inet_connection_sock.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 996 net/ipv4/inet_connection_sock.c struct request_sock_queue *queue = &icsk->icsk_accept_queue; icsk 1054 net/ipv4/inet_connection_sock.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 1056 net/ipv4/inet_connection_sock.c if (icsk->icsk_af_ops->compat_getsockopt) icsk 1057 net/ipv4/inet_connection_sock.c return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname, icsk 1059 net/ipv4/inet_connection_sock.c return icsk->icsk_af_ops->getsockopt(sk, level, optname, icsk 1067 net/ipv4/inet_connection_sock.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 1069 net/ipv4/inet_connection_sock.c if (icsk->icsk_af_ops->compat_setsockopt) icsk 1070 net/ipv4/inet_connection_sock.c return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname, icsk 1072 net/ipv4/inet_connection_sock.c return icsk->icsk_af_ops->setsockopt(sk, level, optname, icsk 173 net/ipv4/inet_diag.c int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, icsk 232 net/ipv4/inet_diag.c if (!icsk) { icsk 237 net/ipv4/inet_diag.c if (icsk->icsk_pending == ICSK_TIME_RETRANS || icsk 238 net/ipv4/inet_diag.c icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || icsk 239 net/ipv4/inet_diag.c icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { icsk 241 net/ipv4/inet_diag.c r->idiag_retrans = icsk->icsk_retransmits; icsk 243 net/ipv4/inet_diag.c jiffies_to_msecs(icsk->icsk_timeout - jiffies); icsk 244 net/ipv4/inet_diag.c } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { icsk 246 net/ipv4/inet_diag.c r->idiag_retrans = icsk->icsk_probes_out; icsk 248 net/ipv4/inet_diag.c jiffies_to_msecs(icsk->icsk_timeout - jiffies); icsk 251 net/ipv4/inet_diag.c r->idiag_retrans = icsk->icsk_probes_out; icsk 273 net/ipv4/inet_diag.c ca_ops = READ_ONCE(icsk->icsk_ca_ops); icsk 293 net/ipv4/inet_diag.c ca_ops = READ_ONCE(icsk->icsk_ca_ops); icsk 265 net/ipv4/inet_hashtables.c struct inet_connection_sock *icsk; icsk 270 net/ipv4/inet_hashtables.c inet_lhash2_for_each_icsk_rcu(icsk, &ilb2->head) { icsk 271 net/ipv4/inet_hashtables.c sk = (struct sock *)icsk; icsk 105 net/ipv4/inet_timewait_sock.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 116 net/ipv4/inet_timewait_sock.c tw->tw_tb = icsk->icsk_bind_hash; icsk 117 net/ipv4/inet_timewait_sock.c WARN_ON(!icsk->icsk_bind_hash); icsk 661 net/ipv4/ip_sockglue.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 669 net/ipv4/ip_sockglue.c icsk->icsk_ext_hdr_len -= old->opt.optlen; icsk 671 net/ipv4/ip_sockglue.c icsk->icsk_ext_hdr_len += opt->opt.optlen; icsk 672 net/ipv4/ip_sockglue.c icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); icsk 205 net/ipv4/syncookies.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 209 net/ipv4/syncookies.c child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst, icsk 410 net/ipv4/tcp.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 419 net/ipv4/tcp.c icsk->icsk_rto = TCP_TIMEOUT_INIT; icsk 451 net/ipv4/tcp.c icsk->icsk_sync_mss = tcp_sync_mss; icsk 1541 net/ipv4/tcp.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 1544 net/ipv4/tcp.c if (icsk->icsk_ack.blocked || icsk 1546 net/ipv4/tcp.c tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || icsk 1554 net/ipv4/tcp.c ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) || icsk 1555 net/ipv4/tcp.c ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && icsk 2572 net/ipv4/tcp.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 2624 net/ipv4/tcp.c icsk->icsk_backoff = 0; icsk 2626 net/ipv4/tcp.c icsk->icsk_probes_out = 0; icsk 2627 net/ipv4/tcp.c icsk->icsk_rto = TCP_TIMEOUT_INIT; icsk 2642 net/ipv4/tcp.c icsk->icsk_ack.rcv_mss = TCP_MIN_MSS; icsk 2684 net/ipv4/tcp.c WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); icsk 2806 net/ipv4/tcp.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 3031 net/ipv4/tcp.c icsk->icsk_syn_retries = val; icsk 3052 net/ipv4/tcp.c icsk->icsk_accept_queue.rskq_defer_accept = icsk 3077 net/ipv4/tcp.c icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; icsk 3101 net/ipv4/tcp.c icsk->icsk_user_timeout = val; icsk 3170 net/ipv4/tcp.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 3173 net/ipv4/tcp.c return icsk->icsk_af_ops->setsockopt(sk, level, optname, icsk 3214 net/ipv4/tcp.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 3250 net/ipv4/tcp.c info->tcpi_ca_state = icsk->icsk_ca_state; icsk 3251 net/ipv4/tcp.c info->tcpi_retransmits = icsk->icsk_retransmits; icsk 3252 net/ipv4/tcp.c info->tcpi_probes = icsk->icsk_probes_out; icsk 3253 net/ipv4/tcp.c info->tcpi_backoff = icsk->icsk_backoff; icsk 3272 net/ipv4/tcp.c info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); icsk 3273 net/ipv4/tcp.c info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato); icsk 3275 net/ipv4/tcp.c info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; icsk 3285 net/ipv4/tcp.c info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); icsk 3288 net/ipv4/tcp.c info->tcpi_pmtu = icsk->icsk_pmtu_cookie; icsk 3414 net/ipv4/tcp.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 3451 net/ipv4/tcp.c val = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; icsk 3459 net/ipv4/tcp.c val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept, icsk 3489 net/ipv4/tcp.c ca_ops = icsk->icsk_ca_ops; icsk 3510 net/ipv4/tcp.c if (copy_to_user(optval, icsk->icsk_ca_ops->name, len)) icsk 3518 net/ipv4/tcp.c if (!icsk->icsk_ulp_ops) { icsk 3525 net/ipv4/tcp.c if (copy_to_user(optval, icsk->icsk_ulp_ops->name, len)) icsk 3538 net/ipv4/tcp.c ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx); icsk 3604 net/ipv4/tcp.c val = icsk->icsk_user_timeout; icsk 3608 net/ipv4/tcp.c val = icsk->icsk_accept_queue.fastopenq.max_qlen; icsk 3701 net/ipv4/tcp.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 3704 net/ipv4/tcp.c return icsk->icsk_af_ops->getsockopt(sk, level, optname, icsk 191 net/ipv4/tcp_bic.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 193 net/ipv4/tcp_bic.c if (icsk->icsk_ca_state == TCP_CA_Open) { icsk 160 net/ipv4/tcp_cong.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 167 net/ipv4/tcp_cong.c icsk->icsk_ca_ops = ca; icsk 170 net/ipv4/tcp_cong.c memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); icsk 179 net/ipv4/tcp_cong.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 182 net/ipv4/tcp_cong.c if (icsk->icsk_ca_ops->init) icsk 183 net/ipv4/tcp_cong.c icsk->icsk_ca_ops->init(sk); icsk 193 net/ipv4/tcp_cong.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 196 net/ipv4/tcp_cong.c icsk->icsk_ca_ops = ca; icsk 197 net/ipv4/tcp_cong.c icsk->icsk_ca_setsockopt = 1; icsk 198 net/ipv4/tcp_cong.c memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); icsk 207 net/ipv4/tcp_cong.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 209 net/ipv4/tcp_cong.c if (icsk->icsk_ca_ops->release) icsk 210 net/ipv4/tcp_cong.c icsk->icsk_ca_ops->release(sk); icsk 211 net/ipv4/tcp_cong.c module_put(icsk->icsk_ca_ops->owner); icsk 339 net/ipv4/tcp_cong.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 343 net/ipv4/tcp_cong.c if (icsk->icsk_ca_dst_locked) icsk 353 net/ipv4/tcp_cong.c if (ca == icsk->icsk_ca_ops) { icsk 354 net/ipv4/tcp_cong.c icsk->icsk_ca_setsockopt = 1; icsk 361 net/ipv4/tcp_cong.c const struct tcp_congestion_ops *old_ca = icsk->icsk_ca_ops; icsk 367 net/ipv4/tcp_cong.c icsk->icsk_ca_ops = ca; icsk 115 net/ipv4/tcp_diag.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 135 net/ipv4/tcp_diag.c ulp_ops = icsk->icsk_ulp_ops; icsk 146 net/ipv4/tcp_diag.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 170 net/ipv4/tcp_diag.c ulp_ops = icsk->icsk_ulp_ops; icsk 84 net/ipv4/tcp_htcp.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 92 net/ipv4/tcp_htcp.c if (icsk->icsk_ca_state == TCP_CA_Open) { icsk 104 net/ipv4/tcp_htcp.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 109 net/ipv4/tcp_htcp.c if (icsk->icsk_ca_state == TCP_CA_Open) icsk 119 net/ipv4/tcp_htcp.c if (!((1 << icsk->icsk_ca_state) & (TCPF_CA_Open | TCPF_CA_Disorder))) { icsk 118 net/ipv4/tcp_input.c void clean_acked_data_enable(struct inet_connection_sock *icsk, icsk 121 net/ipv4/tcp_input.c icsk->icsk_clean_acked = cad; icsk 126 net/ipv4/tcp_input.c void clean_acked_data_disable(struct inet_connection_sock *icsk) icsk 129 net/ipv4/tcp_input.c icsk->icsk_clean_acked = NULL; icsk 164 net/ipv4/tcp_input.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 165 net/ipv4/tcp_input.c const unsigned int lss = icsk->icsk_ack.last_seg_size; icsk 168 net/ipv4/tcp_input.c icsk->icsk_ack.last_seg_size = 0; icsk 174 net/ipv4/tcp_input.c if (len >= icsk->icsk_ack.rcv_mss) { icsk 175 net/ipv4/tcp_input.c icsk->icsk_ack.rcv_mss = min_t(unsigned int, len, icsk 178 net/ipv4/tcp_input.c if (unlikely(len > icsk->icsk_ack.rcv_mss + icsk 201 net/ipv4/tcp_input.c icsk->icsk_ack.last_seg_size = len; icsk 203 net/ipv4/tcp_input.c icsk->icsk_ack.rcv_mss = len; icsk 207 net/ipv4/tcp_input.c if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) icsk 208 net/ipv4/tcp_input.c icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2; icsk 209 net/ipv4/tcp_input.c icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; icsk 215 net/ipv4/tcp_input.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 216 net/ipv4/tcp_input.c unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); icsk 221 net/ipv4/tcp_input.c if (quickacks > icsk->icsk_ack.quick) icsk 222 net/ipv4/tcp_input.c icsk->icsk_ack.quick = quickacks; icsk 227 net/ipv4/tcp_input.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 231 net/ipv4/tcp_input.c icsk->icsk_ack.ato = TCP_ATO_MIN; icsk 241 net/ipv4/tcp_input.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 245 net/ipv4/tcp_input.c (icsk->icsk_ack.quick && !inet_csk_in_pingpong_mode(sk)); icsk 478 net/ipv4/tcp_input.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 481 net/ipv4/tcp_input.c icsk->icsk_ack.quick = 0; icsk 679 net/ipv4/tcp_input.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 690 net/ipv4/tcp_input.c if (!icsk->icsk_ack.ato) { icsk 695 net/ipv4/tcp_input.c icsk->icsk_ack.ato = TCP_ATO_MIN; icsk 697 net/ipv4/tcp_input.c int m = now - icsk->icsk_ack.lrcvtime; icsk 701 net/ipv4/tcp_input.c icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2; icsk 702 net/ipv4/tcp_input.c } else if (m < icsk->icsk_ack.ato) { icsk 703 net/ipv4/tcp_input.c icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m; icsk 704 net/ipv4/tcp_input.c if (icsk->icsk_ack.ato > icsk->icsk_rto) icsk 705 net/ipv4/tcp_input.c icsk->icsk_ack.ato = icsk->icsk_rto; icsk 706 net/ipv4/tcp_input.c } else if (m > icsk->icsk_rto) { icsk 714 net/ipv4/tcp_input.c icsk->icsk_ack.lrcvtime = now; icsk 1991 net/ipv4/tcp_input.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 1994 net/ipv4/tcp_input.c bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery; icsk 1999 net/ipv4/tcp_input.c if (icsk->icsk_ca_state <= TCP_CA_Disorder || icsk 2001 net/ipv4/tcp_input.c (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { icsk 2004 net/ipv4/tcp_input.c tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); icsk 2015 net/ipv4/tcp_input.c if (icsk->icsk_ca_state <= TCP_CA_Disorder && icsk 2028 net/ipv4/tcp_input.c (new_recovery || icsk->icsk_retransmits) && icsk 2362 net/ipv4/tcp_input.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 2364 net/ipv4/tcp_input.c tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); icsk 2568 net/ipv4/tcp_input.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 2570 net/ipv4/tcp_input.c icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1; icsk 2571 net/ipv4/tcp_input.c icsk->icsk_mtup.probe_size = 0; icsk 2578 net/ipv4/tcp_input.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 2584 net/ipv4/tcp_input.c icsk->icsk_mtup.probe_size; icsk 2589 net/ipv4/tcp_input.c icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size; icsk 2590 net/ipv4/tcp_input.c icsk->icsk_mtup.probe_size = 0; icsk 2591 net/ipv4/tcp_input.c tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); icsk 2601 net/ipv4/tcp_input.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 2632 net/ipv4/tcp_input.c if (icsk->icsk_ca_state != TCP_CA_Loss) { icsk 2794 net/ipv4/tcp_input.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 2817 net/ipv4/tcp_input.c if (icsk->icsk_ca_state == TCP_CA_Open) { icsk 2821 net/ipv4/tcp_input.c switch (icsk->icsk_ca_state) { icsk 2842 net/ipv4/tcp_input.c switch (icsk->icsk_ca_state) { icsk 2863 net/ipv4/tcp_input.c if (!(icsk->icsk_ca_state == TCP_CA_Open || icsk 2875 net/ipv4/tcp_input.c if (icsk->icsk_ca_state <= TCP_CA_Disorder) icsk 2885 net/ipv4/tcp_input.c if (icsk->icsk_ca_state < TCP_CA_CWR && icsk 2886 net/ipv4/tcp_input.c icsk->icsk_mtup.probe_size && icsk 2982 net/ipv4/tcp_input.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 2984 net/ipv4/tcp_input.c icsk->icsk_ca_ops->cong_avoid(sk, ack, acked); icsk 2993 net/ipv4/tcp_input.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 3007 net/ipv4/tcp_input.c if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || icsk 3008 net/ipv4/tcp_input.c icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { icsk 3074 net/ipv4/tcp_input.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 3204 net/ipv4/tcp_input.c if (unlikely(icsk->icsk_mtup.probe_size && icsk 3240 net/ipv4/tcp_input.c if (icsk->icsk_ca_ops->pkts_acked) { icsk 3245 net/ipv4/tcp_input.c icsk->icsk_ca_ops->pkts_acked(sk, &sample); icsk 3253 net/ipv4/tcp_input.c icsk = inet_csk(sk); icsk 3256 net/ipv4/tcp_input.c tp->lost_out, icsk->icsk_ca_state); icsk 3261 net/ipv4/tcp_input.c tp->sacked_out, icsk->icsk_ca_state); icsk 3266 net/ipv4/tcp_input.c tp->retrans_out, icsk->icsk_ca_state); icsk 3276 net/ipv4/tcp_input.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 3284 net/ipv4/tcp_input.c icsk->icsk_backoff = 0; icsk 3326 net/ipv4/tcp_input.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 3328 net/ipv4/tcp_input.c if (icsk->icsk_ca_ops->cong_control) { icsk 3329 net/ipv4/tcp_input.c icsk->icsk_ca_ops->cong_control(sk, rs); icsk 3541 net/ipv4/tcp_input.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 3543 net/ipv4/tcp_input.c if (icsk->icsk_ca_ops->in_ack_event) icsk 3544 net/ipv4/tcp_input.c icsk->icsk_ca_ops->in_ack_event(sk, flags); icsk 3587 net/ipv4/tcp_input.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 3629 net/ipv4/tcp_input.c icsk->icsk_retransmits = 0; icsk 3633 net/ipv4/tcp_input.c if (icsk->icsk_clean_acked) icsk 3634 net/ipv4/tcp_input.c icsk->icsk_clean_acked(sk, ack); icsk 3689 net/ipv4/tcp_input.c icsk->icsk_probes_out = 0; icsk 5730 net/ipv4/tcp_input.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 5734 net/ipv4/tcp_input.c icsk->icsk_af_ops->rebuild_header(sk); icsk 5757 net/ipv4/tcp_input.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 5760 net/ipv4/tcp_input.c icsk->icsk_ack.lrcvtime = tcp_jiffies32; icsk 5763 net/ipv4/tcp_input.c icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); icsk 5873 net/ipv4/tcp_input.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 5966 net/ipv4/tcp_input.c tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); icsk 5990 net/ipv4/tcp_input.c icsk->icsk_accept_queue.rskq_defer_accept || icsk 6060 net/ipv4/tcp_input.c tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); icsk 6138 net/ipv4/tcp_input.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 6163 net/ipv4/tcp_input.c acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0; icsk 428 net/ipv4/tcp_ipv4.c struct inet_connection_sock *icsk; icsk 481 net/ipv4/tcp_ipv4.c icsk = inet_csk(sk); icsk 530 net/ipv4/tcp_ipv4.c if (seq != tp->snd_una || !icsk->icsk_retransmits || icsk 531 net/ipv4/tcp_ipv4.c !icsk->icsk_backoff || fastopen) icsk 541 net/ipv4/tcp_ipv4.c icsk->icsk_backoff--; icsk 542 net/ipv4/tcp_ipv4.c icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : icsk 544 net/ipv4/tcp_ipv4.c icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); icsk 549 net/ipv4/tcp_ipv4.c remaining = icsk->icsk_rto - icsk 2080 net/ipv4/tcp_ipv4.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 2084 net/ipv4/tcp_ipv4.c icsk->icsk_af_ops = &ipv4_specific; icsk 2429 net/ipv4/tcp_ipv4.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 2431 net/ipv4/tcp_ipv4.c const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq; icsk 2439 net/ipv4/tcp_ipv4.c if (icsk->icsk_pending == ICSK_TIME_RETRANS || icsk 2440 net/ipv4/tcp_ipv4.c icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || icsk 2441 net/ipv4/tcp_ipv4.c icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { icsk 2443 net/ipv4/tcp_ipv4.c timer_expires = icsk->icsk_timeout; icsk 2444 net/ipv4/tcp_ipv4.c } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { icsk 2446 net/ipv4/tcp_ipv4.c timer_expires = icsk->icsk_timeout; icsk 2472 net/ipv4/tcp_ipv4.c icsk->icsk_retransmits, icsk 2474 net/ipv4/tcp_ipv4.c icsk->icsk_probes_out, icsk 2477 net/ipv4/tcp_ipv4.c jiffies_to_clock_t(icsk->icsk_rto), icsk 2478 net/ipv4/tcp_ipv4.c jiffies_to_clock_t(icsk->icsk_ack.ato), icsk 2479 net/ipv4/tcp_ipv4.c (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk), icsk 322 net/ipv4/tcp_metrics.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 336 net/ipv4/tcp_metrics.c if (icsk->icsk_backoff || !tp->srtt_us) { icsk 401 net/ipv4/tcp_metrics.c icsk->icsk_ca_state == TCP_CA_Open) { icsk 255 net/ipv4/tcp_minisocks.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 264 net/ipv4/tcp_minisocks.c const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); icsk 408 net/ipv4/tcp_minisocks.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 418 net/ipv4/tcp_minisocks.c icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); icsk 419 net/ipv4/tcp_minisocks.c icsk->icsk_ca_ops = ca; icsk 427 net/ipv4/tcp_minisocks.c (!icsk->icsk_ca_setsockopt || icsk 428 net/ipv4/tcp_minisocks.c !try_module_get(icsk->icsk_ca_ops->owner))) icsk 242 net/ipv4/tcp_nv.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 256 net/ipv4/tcp_nv.c if (icsk->icsk_ca_state != TCP_CA_Open && icsk 257 net/ipv4/tcp_nv.c icsk->icsk_ca_state != TCP_CA_Disorder) icsk 66 net/ipv4/tcp_output.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 79 net/ipv4/tcp_output.c if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) icsk 162 net/ipv4/tcp_output.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 173 net/ipv4/tcp_output.c if (before(tp->lsndtime, icsk->icsk_ack.lrcvtime) && icsk 174 net/ipv4/tcp_output.c (u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) icsk 1020 net/ipv4/tcp_output.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 1149 net/ipv4/tcp_output.c icsk->icsk_af_ops->send_check(sk, skb); icsk 1177 net/ipv4/tcp_output.c err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); icsk 1470 net/ipv4/tcp_output.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 1476 net/ipv4/tcp_output.c mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); icsk 1479 net/ipv4/tcp_output.c if (icsk->icsk_af_ops->net_frag_header_len) { icsk 1483 net/ipv4/tcp_output.c mss_now -= icsk->icsk_af_ops->net_frag_header_len; icsk 1491 net/ipv4/tcp_output.c mss_now -= icsk->icsk_ext_hdr_len; icsk 1510 net/ipv4/tcp_output.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 1515 net/ipv4/tcp_output.c icsk->icsk_ext_hdr_len + icsk 1516 net/ipv4/tcp_output.c icsk->icsk_af_ops->net_header_len; icsk 1519 net/ipv4/tcp_output.c if (icsk->icsk_af_ops->net_frag_header_len) { icsk 1523 net/ipv4/tcp_output.c mtu += icsk->icsk_af_ops->net_frag_header_len; icsk 1533 net/ipv4/tcp_output.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 1536 net/ipv4/tcp_output.c icsk->icsk_mtup.enabled = net->ipv4.sysctl_tcp_mtu_probing > 1; icsk 1537 net/ipv4/tcp_output.c icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + icsk 1538 net/ipv4/tcp_output.c icsk->icsk_af_ops->net_header_len; icsk 1539 net/ipv4/tcp_output.c icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss); icsk 1540 net/ipv4/tcp_output.c icsk->icsk_mtup.probe_size = 0; icsk 1541 net/ipv4/tcp_output.c if (icsk->icsk_mtup.enabled) icsk 1542 net/ipv4/tcp_output.c icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; icsk 1571 net/ipv4/tcp_output.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 1574 net/ipv4/tcp_output.c if (icsk->icsk_mtup.search_high > pmtu) icsk 1575 net/ipv4/tcp_output.c icsk->icsk_mtup.search_high = pmtu; icsk 1581 net/ipv4/tcp_output.c icsk->icsk_pmtu_cookie = pmtu; icsk 1582 net/ipv4/tcp_output.c if (icsk->icsk_mtup.enabled) icsk 1583 net/ipv4/tcp_output.c mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); icsk 1944 net/ipv4/tcp_output.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 1951 net/ipv4/tcp_output.c if (icsk->icsk_ca_state >= TCP_CA_Recovery) icsk 2043 net/ipv4/tcp_output.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 2050 net/ipv4/tcp_output.c delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp; icsk 2055 net/ipv4/tcp_output.c icsk->icsk_mtup.probe_size = 0; icsk 2056 net/ipv4/tcp_output.c icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + icsk 2058 net/ipv4/tcp_output.c icsk->icsk_af_ops->net_header_len; icsk 2059 net/ipv4/tcp_output.c icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); icsk 2062 net/ipv4/tcp_output.c icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; icsk 2095 net/ipv4/tcp_output.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 2110 net/ipv4/tcp_output.c if (likely(!icsk->icsk_mtup.enabled || icsk 2111 net/ipv4/tcp_output.c icsk->icsk_mtup.probe_size || icsk 2122 net/ipv4/tcp_output.c probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high + icsk 2123 net/ipv4/tcp_output.c icsk->icsk_mtup.search_low) >> 1); icsk 2125 net/ipv4/tcp_output.c interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low; icsk 2130 net/ipv4/tcp_output.c if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) || icsk 2223 net/ipv4/tcp_output.c icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); icsk 2493 net/ipv4/tcp_output.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 2510 net/ipv4/tcp_output.c (icsk->icsk_ca_state != TCP_CA_Open && icsk 2511 net/ipv4/tcp_output.c icsk->icsk_ca_state != TCP_CA_CWR)) icsk 2703 net/ipv4/tcp_output.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 2711 net/ipv4/tcp_output.c int mss = icsk->icsk_ack.rcv_mss; icsk 2723 net/ipv4/tcp_output.c icsk->icsk_ack.quick = 0; icsk 2899 net/ipv4/tcp_output.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 2906 net/ipv4/tcp_output.c if (icsk->icsk_mtup.probe_size) icsk 2907 net/ipv4/tcp_output.c icsk->icsk_mtup.probe_size = 0; icsk 3048 net/ipv4/tcp_output.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 3088 net/ipv4/tcp_output.c if (icsk->icsk_ca_state != TCP_CA_Loss) icsk 3109 net/ipv4/tcp_output.c icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT) icsk 3370 net/ipv4/tcp_output.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 3380 net/ipv4/tcp_output.c module_put(icsk->icsk_ca_ops->owner); icsk 3381 net/ipv4/tcp_output.c icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); icsk 3382 net/ipv4/tcp_output.c icsk->icsk_ca_ops = ca; icsk 3632 net/ipv4/tcp_output.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 3633 net/ipv4/tcp_output.c int ato = icsk->icsk_ack.ato; icsk 3641 net/ipv4/tcp_output.c (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) icsk 3665 net/ipv4/tcp_output.c if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { icsk 3669 net/ipv4/tcp_output.c if (icsk->icsk_ack.blocked || icsk 3670 net/ipv4/tcp_output.c time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { icsk 3675 net/ipv4/tcp_output.c if (!time_before(timeout, icsk->icsk_ack.timeout)) icsk 3676 net/ipv4/tcp_output.c timeout = icsk->icsk_ack.timeout; icsk 3678 net/ipv4/tcp_output.c icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; icsk 3679 net/ipv4/tcp_output.c icsk->icsk_ack.timeout = timeout; icsk 3680 net/ipv4/tcp_output.c sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); icsk 3818 net/ipv4/tcp_output.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 3828 net/ipv4/tcp_output.c icsk->icsk_probes_out = 0; icsk 3829 net/ipv4/tcp_output.c icsk->icsk_backoff = 0; icsk 3833 net/ipv4/tcp_output.c icsk->icsk_probes_out++; icsk 3835 net/ipv4/tcp_output.c if (icsk->icsk_backoff < net->ipv4.sysctl_tcp_retries2) icsk 3836 net/ipv4/tcp_output.c icsk->icsk_backoff++; icsk 28 net/ipv4/tcp_timer.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 33 net/ipv4/tcp_timer.c if (!icsk->icsk_user_timeout) icsk 34 net/ipv4/tcp_timer.c return icsk->icsk_rto; icsk 36 net/ipv4/tcp_timer.c remaining = icsk->icsk_user_timeout - elapsed; icsk 40 net/ipv4/tcp_timer.c return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining)); icsk 142 net/ipv4/tcp_timer.c static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) icsk 151 net/ipv4/tcp_timer.c if (!icsk->icsk_mtup.enabled) { icsk 152 net/ipv4/tcp_timer.c icsk->icsk_mtup.enabled = 1; icsk 153 net/ipv4/tcp_timer.c icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; icsk 155 net/ipv4/tcp_timer.c mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1; icsk 159 net/ipv4/tcp_timer.c icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); icsk 161 net/ipv4/tcp_timer.c tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); icsk 215 net/ipv4/tcp_timer.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 222 net/ipv4/tcp_timer.c if (icsk->icsk_retransmits) { icsk 227 net/ipv4/tcp_timer.c retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; icsk 228 net/ipv4/tcp_timer.c expired = icsk->icsk_retransmits >= retry_until; icsk 232 net/ipv4/tcp_timer.c tcp_mtu_probing(icsk, sk); icsk 241 net/ipv4/tcp_timer.c const bool alive = icsk->icsk_rto < TCP_RTO_MAX; icsk 253 net/ipv4/tcp_timer.c icsk->icsk_user_timeout); icsk 258 net/ipv4/tcp_timer.c icsk->icsk_retransmits, icsk 259 net/ipv4/tcp_timer.c icsk->icsk_rto, (int)expired); icsk 273 net/ipv4/tcp_timer.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 278 net/ipv4/tcp_timer.c !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) icsk 281 net/ipv4/tcp_timer.c if (time_after(icsk->icsk_ack.timeout, jiffies)) { icsk 282 net/ipv4/tcp_timer.c sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); icsk 285 net/ipv4/tcp_timer.c icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; icsk 290 net/ipv4/tcp_timer.c icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto); icsk 296 net/ipv4/tcp_timer.c icsk->icsk_ack.ato = TCP_ATO_MIN; icsk 320 net/ipv4/tcp_timer.c struct inet_connection_sock *icsk = icsk 321 net/ipv4/tcp_timer.c from_timer(icsk, t, icsk_delack_timer); icsk 322 net/ipv4/tcp_timer.c struct sock *sk = &icsk->icsk_inet.sk; icsk 328 net/ipv4/tcp_timer.c icsk->icsk_ack.blocked = 1; icsk 340 net/ipv4/tcp_timer.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 346 net/ipv4/tcp_timer.c icsk->icsk_probes_out = 0; icsk 358 net/ipv4/tcp_timer.c if (icsk->icsk_user_timeout) { icsk 359 net/ipv4/tcp_timer.c u32 elapsed = tcp_model_timeout(sk, icsk->icsk_probes_out, icsk 362 net/ipv4/tcp_timer.c if (elapsed >= icsk->icsk_user_timeout) icsk 368 net/ipv4/tcp_timer.c const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX; icsk 371 net/ipv4/tcp_timer.c if (!alive && icsk->icsk_backoff >= max_probes) icsk 377 net/ipv4/tcp_timer.c if (icsk->icsk_probes_out >= max_probes) { icsk 391 net/ipv4/tcp_timer.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 392 net/ipv4/tcp_timer.c int max_retries = icsk->icsk_syn_retries ? : icsk 403 net/ipv4/tcp_timer.c if (icsk->icsk_retransmits == 1) icsk 412 net/ipv4/tcp_timer.c icsk->icsk_retransmits++; icsk 435 net/ipv4/tcp_timer.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 492 net/ipv4/tcp_timer.c if (icsk->icsk_retransmits == 0) { icsk 495 net/ipv4/tcp_timer.c if (icsk->icsk_ca_state == TCP_CA_Recovery) { icsk 500 net/ipv4/tcp_timer.c } else if (icsk->icsk_ca_state == TCP_CA_Loss) { icsk 502 net/ipv4/tcp_timer.c } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) || icsk 515 net/ipv4/tcp_timer.c icsk->icsk_retransmits++; icsk 541 net/ipv4/tcp_timer.c icsk->icsk_backoff++; icsk 556 net/ipv4/tcp_timer.c icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) { icsk 557 net/ipv4/tcp_timer.c icsk->icsk_backoff = 0; icsk 558 net/ipv4/tcp_timer.c icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX); icsk 561 net/ipv4/tcp_timer.c icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); icsk 575 net/ipv4/tcp_timer.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 579 net/ipv4/tcp_timer.c !icsk->icsk_pending) icsk 582 net/ipv4/tcp_timer.c if (time_after(icsk->icsk_timeout, jiffies)) { icsk 583 net/ipv4/tcp_timer.c sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); icsk 588 net/ipv4/tcp_timer.c event = icsk->icsk_pending; icsk 598 net/ipv4/tcp_timer.c icsk->icsk_pending = 0; icsk 602 net/ipv4/tcp_timer.c icsk->icsk_pending = 0; icsk 613 net/ipv4/tcp_timer.c struct inet_connection_sock *icsk = icsk 614 net/ipv4/tcp_timer.c from_timer(icsk, t, icsk_retransmit_timer); icsk 615 net/ipv4/tcp_timer.c struct sock *sk = &icsk->icsk_inet.sk; icsk 653 net/ipv4/tcp_timer.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 700 net/ipv4/tcp_timer.c if ((icsk->icsk_user_timeout != 0 && icsk 701 net/ipv4/tcp_timer.c elapsed >= msecs_to_jiffies(icsk->icsk_user_timeout) && icsk 702 net/ipv4/tcp_timer.c icsk->icsk_probes_out > 0) || icsk 703 net/ipv4/tcp_timer.c (icsk->icsk_user_timeout == 0 && icsk 704 net/ipv4/tcp_timer.c icsk->icsk_probes_out >= keepalive_probes(tp))) { icsk 710 net/ipv4/tcp_timer.c icsk->icsk_probes_out++; icsk 102 net/ipv4/tcp_ulp.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 104 net/ipv4/tcp_ulp.c if (!icsk->icsk_ulp_ops) { icsk 110 net/ipv4/tcp_ulp.c if (icsk->icsk_ulp_ops->update) icsk 111 net/ipv4/tcp_ulp.c icsk->icsk_ulp_ops->update(sk, proto, write_space); icsk 116 net/ipv4/tcp_ulp.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 122 net/ipv4/tcp_ulp.c if (!icsk->icsk_ulp_ops) icsk 125 net/ipv4/tcp_ulp.c if (icsk->icsk_ulp_ops->release) icsk 126 net/ipv4/tcp_ulp.c icsk->icsk_ulp_ops->release(sk); icsk 127 net/ipv4/tcp_ulp.c module_put(icsk->icsk_ulp_ops->owner); icsk 129 net/ipv4/tcp_ulp.c icsk->icsk_ulp_ops = NULL; icsk 134 net/ipv4/tcp_ulp.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 138 net/ipv4/tcp_ulp.c if (icsk->icsk_ulp_ops) icsk 145 net/ipv4/tcp_ulp.c icsk->icsk_ulp_ops = ulp_ops; icsk 63 net/ipv4/tcp_yeah.c const struct inet_connection_sock *icsk = inet_csk(sk); icsk 66 net/ipv4/tcp_yeah.c if (icsk->icsk_ca_state == TCP_CA_Open) icsk 123 net/ipv6/inet6_hashtables.c struct inet_connection_sock *icsk; icsk 128 net/ipv6/inet6_hashtables.c inet_lhash2_for_each_icsk_rcu(icsk, &ilb2->head) { icsk 129 net/ipv6/inet6_hashtables.c sk = (struct sock *)icsk; icsk 107 net/ipv6/ipv6_sockglue.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 108 net/ipv6/ipv6_sockglue.c icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen; icsk 109 net/ipv6/ipv6_sockglue.c icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); icsk 217 net/ipv6/ipv6_sockglue.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 223 net/ipv6/ipv6_sockglue.c icsk->icsk_af_ops = &ipv4_specific; icsk 226 net/ipv6/ipv6_sockglue.c tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); icsk 149 net/ipv6/tcp_ipv6.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 229 net/ipv6/tcp_ipv6.c u32 exthdrlen = icsk->icsk_ext_hdr_len; icsk 239 net/ipv6/tcp_ipv6.c icsk->icsk_af_ops = &ipv6_mapped; icsk 248 net/ipv6/tcp_ipv6.c icsk->icsk_ext_hdr_len = exthdrlen; icsk 249 net/ipv6/tcp_ipv6.c icsk->icsk_af_ops = &ipv6_specific; icsk 296 net/ipv6/tcp_ipv6.c icsk->icsk_ext_hdr_len = 0; icsk 298 net/ipv6/tcp_ipv6.c icsk->icsk_ext_hdr_len = opt->opt_flen + icsk 1805 net/ipv6/tcp_ipv6.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 1809 net/ipv6/tcp_ipv6.c icsk->icsk_af_ops = &ipv6_specific; icsk 1866 net/ipv6/tcp_ipv6.c const struct inet_connection_sock *icsk = inet_csk(sp); icsk 1867 net/ipv6/tcp_ipv6.c const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq; icsk 1876 net/ipv6/tcp_ipv6.c if (icsk->icsk_pending == ICSK_TIME_RETRANS || icsk 1877 net/ipv6/tcp_ipv6.c icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || icsk 1878 net/ipv6/tcp_ipv6.c icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { icsk 1880 net/ipv6/tcp_ipv6.c timer_expires = icsk->icsk_timeout; icsk 1881 net/ipv6/tcp_ipv6.c } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { icsk 1883 net/ipv6/tcp_ipv6.c timer_expires = icsk->icsk_timeout; icsk 1915 net/ipv6/tcp_ipv6.c icsk->icsk_retransmits, icsk 1917 net/ipv6/tcp_ipv6.c icsk->icsk_probes_out, icsk 1920 net/ipv6/tcp_ipv6.c jiffies_to_clock_t(icsk->icsk_rto), icsk 1921 net/ipv6/tcp_ipv6.c jiffies_to_clock_t(icsk->icsk_ack.ato), icsk 1922 net/ipv6/tcp_ipv6.c (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp), icsk 293 net/tls/tls_main.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 309 net/tls/tls_main.c rcu_assign_pointer(icsk->icsk_ulp_data, NULL); icsk 600 net/tls/tls_main.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 608 net/tls/tls_main.c rcu_assign_pointer(icsk->icsk_ulp_data, ctx); icsk 642 net/tls/tls_main.c struct inet_connection_sock *icsk = inet_csk(sk); icsk 646 net/tls/tls_main.c rcu_assign_pointer(icsk->icsk_ulp_data, NULL);