BBR_UNIT 152 net/ipv4/tcp_bbr.c static const int bbr_high_gain = BBR_UNIT * 2885 / 1000 + 1; BBR_UNIT 156 net/ipv4/tcp_bbr.c static const int bbr_drain_gain = BBR_UNIT * 1000 / 2885; BBR_UNIT 158 net/ipv4/tcp_bbr.c static const int bbr_cwnd_gain = BBR_UNIT * 2; BBR_UNIT 161 net/ipv4/tcp_bbr.c BBR_UNIT * 5 / 4, /* probe for more available bw */ BBR_UNIT 162 net/ipv4/tcp_bbr.c BBR_UNIT * 3 / 4, /* drain queue and/or yield bw to other flows */ BBR_UNIT 163 net/ipv4/tcp_bbr.c BBR_UNIT, BBR_UNIT, BBR_UNIT, /* cruise at 1.0*bw to utilize pipe, */ BBR_UNIT 164 net/ipv4/tcp_bbr.c BBR_UNIT, BBR_UNIT, BBR_UNIT /* without creating excess queue... */ BBR_UNIT 177 net/ipv4/tcp_bbr.c static const u32 bbr_full_bw_thresh = BBR_UNIT * 5 / 4; BBR_UNIT 187 net/ipv4/tcp_bbr.c static const u32 bbr_lt_bw_ratio = BBR_UNIT / 8; BBR_UNIT 194 net/ipv4/tcp_bbr.c static const int bbr_extra_acked_gain = BBR_UNIT; BBR_UNIT 342 net/ipv4/tcp_bbr.c bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT); BBR_UNIT 446 net/ipv4/tcp_bbr.c if (bbr->pacing_gain > BBR_UNIT) /* increasing inflight */ BBR_UNIT 564 net/ipv4/tcp_bbr.c if (bbr->pacing_gain == BBR_UNIT) BBR_UNIT 575 net/ipv4/tcp_bbr.c if (bbr->pacing_gain > BBR_UNIT) BBR_UNIT 585 net/ipv4/tcp_bbr.c inflight <= bbr_inflight(sk, bw, BBR_UNIT); BBR_UNIT 663 net/ipv4/tcp_bbr.c if ((diff * BBR_UNIT <= bbr_lt_bw_ratio * bbr->lt_bw) || BBR_UNIT 664 net/ipv4/tcp_bbr.c (bbr_rate_bytes_per_sec(sk, diff, BBR_UNIT) <= BBR_UNIT 669 net/ipv4/tcp_bbr.c bbr->pacing_gain = BBR_UNIT; /* try to avoid drops */ BBR_UNIT 897 net/ipv4/tcp_bbr.c bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT); BBR_UNIT 901 net/ipv4/tcp_bbr.c bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT)) BBR_UNIT 999 net/ipv4/tcp_bbr.c BBR_UNIT : BBR_UNIT 1004 net/ipv4/tcp_bbr.c bbr->pacing_gain = BBR_UNIT; BBR_UNIT 1005 net/ipv4/tcp_bbr.c bbr->cwnd_gain = BBR_UNIT;