Lines Matching refs:icsk
134 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_measure_rcv_mss() local
135 const unsigned int lss = icsk->icsk_ack.last_seg_size; in tcp_measure_rcv_mss()
138 icsk->icsk_ack.last_seg_size = 0; in tcp_measure_rcv_mss()
144 if (len >= icsk->icsk_ack.rcv_mss) { in tcp_measure_rcv_mss()
145 icsk->icsk_ack.rcv_mss = len; in tcp_measure_rcv_mss()
166 icsk->icsk_ack.last_seg_size = len; in tcp_measure_rcv_mss()
168 icsk->icsk_ack.rcv_mss = len; in tcp_measure_rcv_mss()
172 if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) in tcp_measure_rcv_mss()
173 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2; in tcp_measure_rcv_mss()
174 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; in tcp_measure_rcv_mss()
180 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_incr_quickack() local
181 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); in tcp_incr_quickack()
185 if (quickacks > icsk->icsk_ack.quick) in tcp_incr_quickack()
186 icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS); in tcp_incr_quickack()
191 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_enter_quickack_mode() local
193 icsk->icsk_ack.pingpong = 0; in tcp_enter_quickack_mode()
194 icsk->icsk_ack.ato = TCP_ATO_MIN; in tcp_enter_quickack_mode()
203 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_in_quickack_mode() local
207 (icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong); in tcp_in_quickack_mode()
447 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clamp_window() local
449 icsk->icsk_ack.quick = 0; in tcp_clamp_window()
635 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_data_recv() local
646 if (!icsk->icsk_ack.ato) { in tcp_event_data_recv()
651 icsk->icsk_ack.ato = TCP_ATO_MIN; in tcp_event_data_recv()
653 int m = now - icsk->icsk_ack.lrcvtime; in tcp_event_data_recv()
657 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2; in tcp_event_data_recv()
658 } else if (m < icsk->icsk_ack.ato) { in tcp_event_data_recv()
659 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m; in tcp_event_data_recv()
660 if (icsk->icsk_ack.ato > icsk->icsk_rto) in tcp_event_data_recv()
661 icsk->icsk_ack.ato = icsk->icsk_rto; in tcp_event_data_recv()
662 } else if (m > icsk->icsk_rto) { in tcp_event_data_recv()
670 icsk->icsk_ack.lrcvtime = now; in tcp_event_data_recv()
1874 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_enter_loss() local
1877 bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery; in tcp_enter_loss()
1881 if (icsk->icsk_ca_state <= TCP_CA_Disorder || in tcp_enter_loss()
1883 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { in tcp_enter_loss()
1885 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); in tcp_enter_loss()
1925 if (icsk->icsk_ca_state <= TCP_CA_Disorder && in tcp_enter_loss()
1938 (new_recovery || icsk->icsk_retransmits) && in tcp_enter_loss()
2358 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_undo_cwnd_reduction() local
2360 if (icsk->icsk_ca_ops->undo_cwnd) in tcp_undo_cwnd_reduction()
2361 tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); in tcp_undo_cwnd_reduction()
2563 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtup_probe_failed() local
2565 icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1; in tcp_mtup_probe_failed()
2566 icsk->icsk_mtup.probe_size = 0; in tcp_mtup_probe_failed()
2573 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtup_probe_success() local
2579 icsk->icsk_mtup.probe_size; in tcp_mtup_probe_success()
2584 icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size; in tcp_mtup_probe_success()
2585 icsk->icsk_mtup.probe_size = 0; in tcp_mtup_probe_success()
2586 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_mtup_probe_success()
2596 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_simple_retransmit() local
2630 if (icsk->icsk_ca_state != TCP_CA_Loss) { in tcp_simple_retransmit()
2763 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_fastretrans_alert() local
2788 if (icsk->icsk_ca_state == TCP_CA_Open) { in tcp_fastretrans_alert()
2792 switch (icsk->icsk_ca_state) { in tcp_fastretrans_alert()
2818 switch (icsk->icsk_ca_state) { in tcp_fastretrans_alert()
2837 if (icsk->icsk_ca_state != TCP_CA_Open && in tcp_fastretrans_alert()
2849 if (icsk->icsk_ca_state <= TCP_CA_Disorder) in tcp_fastretrans_alert()
2858 if (icsk->icsk_ca_state < TCP_CA_CWR && in tcp_fastretrans_alert()
2859 icsk->icsk_mtup.probe_size && in tcp_fastretrans_alert()
2997 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cong_avoid() local
2999 icsk->icsk_ca_ops->cong_avoid(sk, ack, acked); in tcp_cong_avoid()
3008 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_rearm_rto() local
3022 if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || in tcp_rearm_rto()
3023 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { in tcp_rearm_rto()
3101 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clean_rtx_queue() local
3212 if (unlikely(icsk->icsk_mtup.probe_size && in tcp_clean_rtx_queue()
3242 if (icsk->icsk_ca_ops->pkts_acked) in tcp_clean_rtx_queue()
3243 icsk->icsk_ca_ops->pkts_acked(sk, pkts_acked, ca_rtt_us); in tcp_clean_rtx_queue()
3250 icsk = inet_csk(sk); in tcp_clean_rtx_queue()
3253 tp->lost_out, icsk->icsk_ca_state); in tcp_clean_rtx_queue()
3258 tp->sacked_out, icsk->icsk_ca_state); in tcp_clean_rtx_queue()
3263 tp->retrans_out, icsk->icsk_ca_state); in tcp_clean_rtx_queue()
3274 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ack_probe() local
3279 icsk->icsk_backoff = 0; in tcp_ack_probe()
3505 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_in_ack_event() local
3507 if (icsk->icsk_ca_ops->in_ack_event) in tcp_in_ack_event()
3508 icsk->icsk_ca_ops->in_ack_event(sk, flags); in tcp_in_ack_event()
3514 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ack() local
3549 if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || in tcp_ack()
3550 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) in tcp_ack()
3555 icsk->icsk_retransmits = 0; in tcp_ack()
3607 icsk->icsk_probes_out = 0; in tcp_ack()
3636 if (icsk->icsk_pending == ICSK_TIME_RETRANS) in tcp_ack()
5426 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_finish_connect() local
5431 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); in tcp_finish_connect()
5436 icsk->icsk_af_ops->rebuild_header(sk); in tcp_finish_connect()
5520 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_rcv_synsent_state_process() local
5614 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_rcv_synsent_state_process()
5631 icsk->icsk_accept_queue.rskq_defer_accept || in tcp_rcv_synsent_state_process()
5632 icsk->icsk_ack.pingpong) { in tcp_rcv_synsent_state_process()
5641 icsk->icsk_ack.lrcvtime = tcp_time_stamp; in tcp_rcv_synsent_state_process()
5702 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_rcv_synsent_state_process()
5748 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_rcv_state_process() local
5770 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) in tcp_rcv_state_process()
5842 icsk->icsk_af_ops->rebuild_header(sk); in tcp_rcv_state_process()