/linux-4.4.14/net/sctp/ |
H A D | transport.c | 385 __u32 cwnd, ssthresh, flight_size, pba, pmtu; sctp_transport_raise_cwnd() local 403 ssthresh = transport->ssthresh; sctp_transport_raise_cwnd() 407 if (cwnd <= ssthresh) { sctp_transport_raise_cwnd() 409 * o When cwnd is less than or equal to ssthresh, an SCTP sctp_transport_raise_cwnd() 431 "cwnd:%d, ssthresh:%d, flight_size:%d, pba:%d\n", sctp_transport_raise_cwnd() 432 __func__, transport, bytes_acked, cwnd, ssthresh, sctp_transport_raise_cwnd() 435 /* RFC 2960 7.2.2 Whenever cwnd is greater than ssthresh, sctp_transport_raise_cwnd() 456 "bytes_acked:%d, cwnd:%d, ssthresh:%d, " sctp_transport_raise_cwnd() 458 transport, bytes_acked, cwnd, ssthresh, sctp_transport_raise_cwnd() 479 * ssthresh = max(cwnd/2, 4*MTU) sctp_transport_lower_cwnd() 483 transport->ssthresh = max(transport->cwnd/2, sctp_transport_lower_cwnd() 492 /* RFC 2960 7.2.4 Adjust the ssthresh and cwnd of the sctp_transport_lower_cwnd() 500 * ssthresh = max(cwnd/2, 4*MTU) sctp_transport_lower_cwnd() 501 * cwnd = ssthresh sctp_transport_lower_cwnd() 511 transport->ssthresh = max(transport->cwnd/2, sctp_transport_lower_cwnd() 513 transport->cwnd = transport->ssthresh; sctp_transport_lower_cwnd() 524 * slow start threshold "ssthresh". sctp_transport_lower_cwnd() 531 transport->ssthresh = max(transport->cwnd/2, sctp_transport_lower_cwnd() 533 transport->cwnd = transport->ssthresh; sctp_transport_lower_cwnd() 554 pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d\n", sctp_transport_lower_cwnd() 556 transport->ssthresh); sctp_transport_lower_cwnd() 615 * All the congestion control parameters (e.g., cwnd, ssthresh) sctp_transport_reset() 621 t->ssthresh = asoc->peer.i.a_rwnd; sctp_transport_reset()
|
H A D | probe.c | 168 &sp->ipaddr, sp->state, sp->cwnd, sp->ssthresh, jsctp_sf_eat_sack()
|
H A D | outqueue.c | 456 pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d, " sctp_retransmit_mark() 458 transport->cwnd, transport->ssthresh, transport->flight_size, sctp_retransmit_mark() 1630 pr_debug("%s: transport:%p, cwnd:%d, ssthresh:%d, " 1632 transport->cwnd, transport->ssthresh,
|
H A D | associola.c | 676 * o The initial value of ssthresh MAY be arbitrarily high sctp_assoc_add_peer() 683 * so initialize ssthresh to the default value and it will be set sctp_assoc_add_peer() 686 peer->ssthresh = SCTP_DEFAULT_MAXWINDOW; sctp_assoc_add_peer()
|
H A D | sm_sideeffect.c | 107 * so we should take action by reducing cwnd and ssthresh sctp_do_ecn_ecne_work()
|
H A D | sm_make_chunk.c | 2411 /* RFC 2960 7.2.1 The initial value of ssthresh MAY be arbitrarily 2417 transport->ssthresh = asoc->peer.i.a_rwnd;
|
H A D | sm_statefuns.c | 5443 * expires, adjust its ssthresh with rules defined in Section sctp_sf_do_6_3_3_rtx()
|
H A D | socket.c | 592 trans->ssthresh = asoc->peer.i.a_rwnd; sctp_send_asconf_add_ip()
|
/linux-4.4.14/net/ipv4/ |
H A D | tcp_scalable.c | 40 .ssthresh = tcp_scalable_ssthresh,
|
H A D | tcp_hybla.c | 159 /* clamp down slowstart cwnd to ssthresh value. */ hybla_cong_avoid() 168 .ssthresh = tcp_reno_ssthresh,
|
H A D | tcp_cong.c | 71 /* all algorithms must implement ssthresh and cong_avoid ops */ tcp_register_congestion_control() 72 if (!ca->ssthresh || !ca->cong_avoid) { tcp_register_congestion_control() 374 * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and 448 .ssthresh = tcp_reno_ssthresh,
|
H A D | tcp_highspeed.c | 162 .ssthresh = hstcp_ssthresh,
|
H A D | tcp_probe.c | 72 u32 ssthresh; member in struct:tcp_log 154 p->ssthresh = tcp_current_ssthresh(sk); jtcp_rcv_established() 198 p->snd_cwnd, p->ssthresh, p->snd_wnd, p->srtt, p->rcv_wnd); tcpprobe_sprint()
|
H A D | tcp_cdg.c | 136 * o Invoked only when cwnd < ssthresh (i.e. not when cwnd == ssthresh). 405 .ssthresh = tcp_cdg_ssthresh,
|
H A D | tcp_westwood.c | 21 * ssthresh after packet loss. The probing phase is as the original Reno. 278 .ssthresh = tcp_reno_ssthresh,
|
H A D | tcp_dctcp.c | 315 .ssthresh = dctcp_ssthresh, 325 .ssthresh = tcp_reno_ssthresh,
|
H A D | tcp_bic.c | 214 .ssthresh = bictcp_recalc_ssthresh,
|
H A D | tcp_htcp.c | 291 .ssthresh = htcp_recalc_ssthresh,
|
H A D | tcp_illinois.c | 328 .ssthresh = tcp_illinois_ssthresh,
|
H A D | tcp_veno.c | 205 .ssthresh = tcp_veno_ssthresh,
|
H A D | tcp_yeah.c | 227 .ssthresh = tcp_yeah_ssthresh,
|
H A D | tcp_lp.c | 318 .ssthresh = tcp_reno_ssthresh,
|
H A D | tcp_vegas.c | 309 .ssthresh = tcp_reno_ssthresh,
|
H A D | tcp_cubic.c | 470 .ssthresh = bictcp_recalc_ssthresh,
|
H A D | tcp_metrics.c | 460 * ssthresh may be also invalid. tcp_update_metrics() 516 /* ssthresh may have been reduced unnecessarily during. tcp_init_metrics()
|
H A D | tcp_input.c | 1880 /* Reduce ssthresh if it has not yet been made inside this window. */ tcp_enter_loss() 1885 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); tcp_enter_loss() 2452 * 1) If the packets in flight is larger than ssthresh, PRR spreads the 2456 * slow starts cwnd up to ssthresh to speed up the recovery. 2468 tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); tcp_init_cwnd_reduction() 2505 /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */ tcp_end_cwnd_reduction() 2628 * cwnd/ssthresh really reduced now.
|
/linux-4.4.14/drivers/infiniband/hw/nes/ |
H A D | nes_context.h | 64 __le32 ssthresh; member in struct:nes_qp_context
|
H A D | nes_cm.c | 2972 nesqp->nesqp_context->ssthresh = cpu_to_le32(0x3FFFC000); nes_cm_init_tsa_conn()
|
/linux-4.4.14/net/dccp/ccids/ |
H A D | ccid2.h | 54 * @tx_{cwnd,ssthresh,pipe}: as per RFC 4341, section 5
|
H A D | ccid2.c | 716 /* RFC 4341, 5: initialise ssthresh to arbitrarily high (max) value */ ccid2_hc_tx_init()
|
/linux-4.4.14/drivers/block/aoe/ |
H A D | aoe.h | 143 ushort ssthresh; /* slow start threshold */ member in struct:aoetgt
|
H A D | aoeblk.c | 140 seq_printf(s, "\tssthresh:%d\n", (*t)->ssthresh); aoedisk_debugfs_show()
|
H A D | aoecmd.c | 814 t->ssthresh = t->maxout / 2; 1065 if (t->maxout < t->ssthresh) calc_rttavg() 1673 t->ssthresh = t->nframes / 2; aoecmd_wreset()
|
/linux-4.4.14/include/linux/ |
H A D | tcp.h | 292 u32 prior_ssthresh; /* ssthresh saved at recovery start */
|
/linux-4.4.14/include/net/ |
H A D | tcp.h | 860 u32 (*ssthresh)(struct sock *sk); member in struct:tcp_congestion_ops 1020 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd. 1022 * ssthresh.
|
/linux-4.4.14/include/net/sctp/ |
H A D | structs.h | 820 /* ssthresh : The current slow start threshold value. */ 821 __u32 ssthresh; member in struct:sctp_transport
|