rtt_us 35 fs/afs/fs_probe.c unsigned int rtt_us; rtt_us 95 fs/afs/fs_probe.c rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall); rtt_us 96 fs/afs/fs_probe.c if (rtt_us < server->probe.rtt) { rtt_us 97 fs/afs/fs_probe.c server->probe.rtt = rtt_us; rtt_us 109 fs/afs/fs_probe.c server_index, index, &alist->addrs[index].transport, rtt_us, ret); rtt_us 34 fs/afs/vl_probe.c unsigned int rtt_us = 0; rtt_us 95 fs/afs/vl_probe.c rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall); rtt_us 96 fs/afs/vl_probe.c if (rtt_us < server->probe.rtt) { rtt_us 97 fs/afs/vl_probe.c server->probe.rtt = rtt_us; rtt_us 109 fs/afs/vl_probe.c server_index, index, &alist->addrs[index].transport, rtt_us, ret); rtt_us 208 include/linux/tcp.h u32 rtt_us; /* Associated RTT */ rtt_us 362 include/linux/tcp.h u32 rtt_us; rtt_us 1009 include/net/tcp.h s32 rtt_us; rtt_us 1028 include/net/tcp.h long rtt_us; /* RTT of last (S)ACKed packet (or -1) */ rtt_us 3295 net/ipv4/tcp.c info->tcpi_rcv_rtt = tp->rcv_rtt_est.rtt_us >> 3; rtt_us 269 net/ipv4/tcp_bbr.c u32 rtt_us; rtt_us 272 net/ipv4/tcp_bbr.c rtt_us = max(tp->srtt_us >> 3, 1U); rtt_us 275 net/ipv4/tcp_bbr.c rtt_us = USEC_PER_MSEC; /* use nominal default RTT */ rtt_us 278 net/ipv4/tcp_bbr.c do_div(bw, rtt_us); rtt_us 947 net/ipv4/tcp_bbr.c if (rs->rtt_us >= 0 && rtt_us 948 net/ipv4/tcp_bbr.c (rs->rtt_us <= bbr->min_rtt_us || rtt_us 950 net/ipv4/tcp_bbr.c bbr->min_rtt_us = rs->rtt_us; rtt_us 304 net/ipv4/tcp_cdg.c if (sample->rtt_us <= 0) rtt_us 316 net/ipv4/tcp_cdg.c ca->rtt.min = min(ca->rtt.min, sample->rtt_us); rtt_us 324 net/ipv4/tcp_cdg.c ca->rtt.min = min_not_zero(ca->rtt.min, sample->rtt_us); rtt_us 325 net/ipv4/tcp_cdg.c ca->rtt.max = max(ca->rtt.max, sample->rtt_us); rtt_us 437 net/ipv4/tcp_cubic.c if (sample->rtt_us < 0) rtt_us 444 net/ipv4/tcp_cubic.c delay = (sample->rtt_us << 3) / USEC_PER_MSEC; rtt_us 112 net/ipv4/tcp_htcp.c if (sample->rtt_us > 0) rtt_us 113 net/ipv4/tcp_htcp.c measure_rtt(sk, usecs_to_jiffies(sample->rtt_us)); rtt_us 89 net/ipv4/tcp_illinois.c s32 rtt_us = sample->rtt_us; rtt_us 94 net/ipv4/tcp_illinois.c if (rtt_us < 0) rtt_us 98 net/ipv4/tcp_illinois.c if (rtt_us > RTT_MAX) rtt_us 99 net/ipv4/tcp_illinois.c rtt_us = RTT_MAX; rtt_us 102 net/ipv4/tcp_illinois.c if (ca->base_rtt > rtt_us) rtt_us 103 net/ipv4/tcp_illinois.c ca->base_rtt = rtt_us; rtt_us 106 net/ipv4/tcp_illinois.c if (ca->max_rtt < rtt_us) rtt_us 107 net/ipv4/tcp_illinois.c ca->max_rtt = rtt_us; rtt_us 110 net/ipv4/tcp_illinois.c ca->sum_rtt += rtt_us; rtt_us 528 net/ipv4/tcp_input.c u32 new_sample = tp->rcv_rtt_est.rtt_us; rtt_us 555 net/ipv4/tcp_input.c tp->rcv_rtt_est.rtt_us = new_sample; rtt_us 613 net/ipv4/tcp_input.c if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0) rtt_us 2905 net/ipv4/tcp_input.c static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us, const int flag) rtt_us 2910 net/ipv4/tcp_input.c if ((flag & FLAG_ACK_MAYBE_DELAYED) && rtt_us > tcp_min_rtt(tp)) { rtt_us 2918 net/ipv4/tcp_input.c rtt_us ? : jiffies_to_usecs(1)); rtt_us 2950 net/ipv4/tcp_input.c rs->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet (or -1) */ rtt_us 2971 net/ipv4/tcp_input.c long rtt_us = -1L; rtt_us 2974 net/ipv4/tcp_input.c rtt_us = tcp_stamp_us_delta(tcp_clock_us(), tcp_rsk(req)->snt_synack); rtt_us 2976 net/ipv4/tcp_input.c tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L, rtt_us, &rs); rtt_us 3242 net/ipv4/tcp_input.c .rtt_us = sack->rate->rtt_us, rtt_us 5278 net/ipv4/tcp_input.c rtt = tp->rcv_rtt_est.rtt_us; rtt_us 271 net/ipv4/tcp_lp.c if (sample->rtt_us > 0) rtt_us 272 net/ipv4/tcp_lp.c tcp_lp_rtt_sample(sk, sample->rtt_us); rtt_us 252 net/ipv4/tcp_nv.c if (sample->rtt_us < 0) rtt_us 275 net/ipv4/tcp_nv.c avg_rtt = (((u64)sample->rtt_us) * nv_rtt_factor + rtt_us 279 net/ipv4/tcp_nv.c avg_rtt = sample->rtt_us; rtt_us 284 net/ipv4/tcp_nv.c avg_rtt = sample->rtt_us; rtt_us 52 net/ipv4/tcp_recovery.c return tp->rack.rtt_us + reo_wnd - rtt_us 138 net/ipv4/tcp_recovery.c u32 rtt_us; rtt_us 140 net/ipv4/tcp_recovery.c rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time); rtt_us 141 net/ipv4/tcp_recovery.c if (rtt_us < tcp_min_rtt(tp) && (sacked & TCPCB_RETRANS)) { rtt_us 155 net/ipv4/tcp_recovery.c tp->rack.rtt_us = rtt_us; rtt_us 116 net/ipv4/tcp_vegas.c if (sample->rtt_us < 0) rtt_us 120 net/ipv4/tcp_vegas.c vrtt = sample->rtt_us + 1; rtt_us 79 net/ipv4/tcp_veno.c if (sample->rtt_us < 0) rtt_us 83 net/ipv4/tcp_veno.c vrtt = sample->rtt_us + 1; rtt_us 108 net/ipv4/tcp_westwood.c if (sample->rtt_us > 0) rtt_us 109 net/ipv4/tcp_westwood.c w->rtt = usecs_to_jiffies(sample->rtt_us); rtt_us 131 net/rxrpc/rtt.c static void rxrpc_ack_update_rtt(struct rxrpc_peer *peer, long rtt_us) rtt_us 133 net/rxrpc/rtt.c if (rtt_us < 0) rtt_us 137 net/rxrpc/rtt.c rxrpc_rtt_estimator(peer, rtt_us); rtt_us 153 net/rxrpc/rtt.c s64 rtt_us; rtt_us 155 net/rxrpc/rtt.c rtt_us = ktime_to_us(ktime_sub(resp_time, send_time)); rtt_us 156 net/rxrpc/rtt.c if (rtt_us < 0) rtt_us 160 net/rxrpc/rtt.c rxrpc_ack_update_rtt(peer, rtt_us);