bbr 212 include/uapi/linux/inet_diag.h struct tcp_bbr_info bbr; bbr 207 net/ipv4/tcp_bbr.c const struct bbr *bbr = inet_csk_ca(sk); bbr 209 net/ipv4/tcp_bbr.c return bbr->full_bw_reached; bbr 215 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 217 net/ipv4/tcp_bbr.c return minmax_get(&bbr->bw); bbr 223 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 225 net/ipv4/tcp_bbr.c return bbr->lt_use_bw ? bbr->lt_bw : bbr_max_bw(sk); bbr 233 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 235 net/ipv4/tcp_bbr.c return max(bbr->extra_acked[0], bbr->extra_acked[1]); bbr 267 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 273 net/ipv4/tcp_bbr.c bbr->has_seen_rtt = 1; bbr 286 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 289 net/ipv4/tcp_bbr.c if (unlikely(!bbr->has_seen_rtt && tp->srtt_us)) bbr 321 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 323 net/ipv4/tcp_bbr.c if (bbr->prev_ca_state < TCP_CA_Recovery && bbr->mode != BBR_PROBE_RTT) bbr 324 net/ipv4/tcp_bbr.c bbr->prior_cwnd = tp->snd_cwnd; /* this cwnd is good enough */ bbr 326 net/ipv4/tcp_bbr.c bbr->prior_cwnd = max(bbr->prior_cwnd, tp->snd_cwnd); bbr 332 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 335 net/ipv4/tcp_bbr.c bbr->idle_restart = 1; bbr 336 net/ipv4/tcp_bbr.c bbr->ack_epoch_mstamp = tp->tcp_mstamp; bbr 337 net/ipv4/tcp_bbr.c bbr->ack_epoch_acked = 0; bbr 341 net/ipv4/tcp_bbr.c if (bbr->mode == BBR_PROBE_BW) bbr 343 net/ipv4/tcp_bbr.c else if (bbr->mode == BBR_PROBE_RTT) bbr 359 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 369 net/ipv4/tcp_bbr.c if (unlikely(bbr->min_rtt_us == ~0U)) /* no valid RTT samples yet? */ bbr 372 net/ipv4/tcp_bbr.c w = (u64)bw * bbr->min_rtt_us; bbr 394 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 403 net/ipv4/tcp_bbr.c if (bbr->mode == BBR_PROBE_BW && bbr->cycle_idx == 0) bbr 437 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 446 net/ipv4/tcp_bbr.c if (bbr->pacing_gain > BBR_UNIT) /* increasing inflight */ bbr 481 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 482 net/ipv4/tcp_bbr.c u8 prev_state = bbr->prev_ca_state, state = inet_csk(sk)->icsk_ca_state; bbr 494 net/ipv4/tcp_bbr.c bbr->packet_conservation = 1; bbr 495 net/ipv4/tcp_bbr.c bbr->next_rtt_delivered = tp->delivered; /* start round now */ bbr 500 net/ipv4/tcp_bbr.c cwnd = max(cwnd, bbr->prior_cwnd); bbr 501 net/ipv4/tcp_bbr.c bbr->packet_conservation = 0; bbr 503 net/ipv4/tcp_bbr.c bbr->prev_ca_state = state; bbr 505 net/ipv4/tcp_bbr.c if (bbr->packet_conservation) { bbr 520 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 546 net/ipv4/tcp_bbr.c if (bbr->mode == BBR_PROBE_RTT) /* drain queue, refresh min_rtt */ bbr 555 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 557 net/ipv4/tcp_bbr.c tcp_stamp_us_delta(tp->delivered_mstamp, bbr->cycle_mstamp) > bbr 558 net/ipv4/tcp_bbr.c bbr->min_rtt_us; bbr 564 net/ipv4/tcp_bbr.c if (bbr->pacing_gain == BBR_UNIT) bbr 575 net/ipv4/tcp_bbr.c if (bbr->pacing_gain > BBR_UNIT) bbr 578 net/ipv4/tcp_bbr.c inflight >= bbr_inflight(sk, bw, bbr->pacing_gain)); bbr 591 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 593 net/ipv4/tcp_bbr.c bbr->cycle_idx = (bbr->cycle_idx + 1) & (CYCLE_LEN - 1); bbr 594 net/ipv4/tcp_bbr.c bbr->cycle_mstamp = tp->delivered_mstamp; bbr 601 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 603 net/ipv4/tcp_bbr.c if (bbr->mode == BBR_PROBE_BW && bbr_is_next_cycle_phase(sk, rs)) bbr 609 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 611 net/ipv4/tcp_bbr.c bbr->mode = BBR_STARTUP; bbr 616 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 618 net/ipv4/tcp_bbr.c bbr->mode = BBR_PROBE_BW; bbr 619 net/ipv4/tcp_bbr.c bbr->cycle_idx = CYCLE_LEN - 1 - prandom_u32_max(bbr_cycle_rand); bbr 635 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 637 net/ipv4/tcp_bbr.c bbr->lt_last_stamp = div_u64(tp->delivered_mstamp, USEC_PER_MSEC); bbr 638 net/ipv4/tcp_bbr.c bbr->lt_last_delivered = tp->delivered; bbr 639 net/ipv4/tcp_bbr.c bbr->lt_last_lost = tp->lost; bbr 640 net/ipv4/tcp_bbr.c bbr->lt_rtt_cnt = 0; bbr 646 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 648 net/ipv4/tcp_bbr.c bbr->lt_bw = 0; bbr 649 net/ipv4/tcp_bbr.c bbr->lt_use_bw = 0; bbr 650 net/ipv4/tcp_bbr.c bbr->lt_is_sampling = false; bbr 657 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 660 net/ipv4/tcp_bbr.c if (bbr->lt_bw) { /* do we have bw from a previous interval? */ bbr 662 net/ipv4/tcp_bbr.c diff = abs(bw - bbr->lt_bw); bbr 663 net/ipv4/tcp_bbr.c if ((diff * BBR_UNIT <= bbr_lt_bw_ratio * bbr->lt_bw) || bbr 667 net/ipv4/tcp_bbr.c bbr->lt_bw = (bw + bbr->lt_bw) >> 1; /* avg 2 intvls */ bbr 668 net/ipv4/tcp_bbr.c bbr->lt_use_bw = 1; bbr 669 net/ipv4/tcp_bbr.c bbr->pacing_gain = BBR_UNIT; /* try to avoid drops */ bbr 670 net/ipv4/tcp_bbr.c bbr->lt_rtt_cnt = 0; bbr 674 net/ipv4/tcp_bbr.c bbr->lt_bw = bw; bbr 688 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 693 net/ipv4/tcp_bbr.c if (bbr->lt_use_bw) { /* already using long-term rate, lt_bw? */ bbr 694 net/ipv4/tcp_bbr.c if (bbr->mode == BBR_PROBE_BW && bbr->round_start && bbr 695 net/ipv4/tcp_bbr.c ++bbr->lt_rtt_cnt >= bbr_lt_bw_max_rtts) { bbr 706 net/ipv4/tcp_bbr.c if (!bbr->lt_is_sampling) { bbr 710 net/ipv4/tcp_bbr.c bbr->lt_is_sampling = true; bbr 719 net/ipv4/tcp_bbr.c if (bbr->round_start) bbr 720 net/ipv4/tcp_bbr.c bbr->lt_rtt_cnt++; /* count round trips in this interval */ bbr 721 net/ipv4/tcp_bbr.c if (bbr->lt_rtt_cnt < bbr_lt_intvl_min_rtts) bbr 723 net/ipv4/tcp_bbr.c if (bbr->lt_rtt_cnt > 4 * bbr_lt_intvl_min_rtts) { bbr 736 net/ipv4/tcp_bbr.c lost = tp->lost - bbr->lt_last_lost; bbr 737 net/ipv4/tcp_bbr.c delivered = tp->delivered - bbr->lt_last_delivered; bbr 743 net/ipv4/tcp_bbr.c t = div_u64(tp->delivered_mstamp, USEC_PER_MSEC) - bbr->lt_last_stamp; bbr 761 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 764 net/ipv4/tcp_bbr.c bbr->round_start = 0; bbr 769 net/ipv4/tcp_bbr.c if (!before(rs->prior_delivered, bbr->next_rtt_delivered)) { bbr 770 net/ipv4/tcp_bbr.c bbr->next_rtt_delivered = tp->delivered; bbr 771 net/ipv4/tcp_bbr.c bbr->rtt_cnt++; bbr 772 net/ipv4/tcp_bbr.c bbr->round_start = 1; bbr 773 net/ipv4/tcp_bbr.c bbr->packet_conservation = 0; bbr 797 net/ipv4/tcp_bbr.c minmax_running_max(&bbr->bw, bbr_bw_rtts, bbr->rtt_cnt, bw); bbr 818 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 825 net/ipv4/tcp_bbr.c if (bbr->round_start) { bbr 826 net/ipv4/tcp_bbr.c bbr->extra_acked_win_rtts = min(0x1F, bbr 827 net/ipv4/tcp_bbr.c bbr->extra_acked_win_rtts + 1); bbr 828 net/ipv4/tcp_bbr.c if (bbr->extra_acked_win_rtts >= bbr_extra_acked_win_rtts) { bbr 829 net/ipv4/tcp_bbr.c bbr->extra_acked_win_rtts = 0; bbr 830 net/ipv4/tcp_bbr.c bbr->extra_acked_win_idx = bbr->extra_acked_win_idx ? bbr 832 net/ipv4/tcp_bbr.c bbr->extra_acked[bbr->extra_acked_win_idx] = 0; bbr 838 net/ipv4/tcp_bbr.c bbr->ack_epoch_mstamp); bbr 845 net/ipv4/tcp_bbr.c if (bbr->ack_epoch_acked <= expected_acked || bbr 846 net/ipv4/tcp_bbr.c (bbr->ack_epoch_acked + rs->acked_sacked >= bbr 848 net/ipv4/tcp_bbr.c bbr->ack_epoch_acked = 0; bbr 849 net/ipv4/tcp_bbr.c bbr->ack_epoch_mstamp = tp->delivered_mstamp; bbr 854 net/ipv4/tcp_bbr.c bbr->ack_epoch_acked = min_t(u32, 0xFFFFF, bbr 855 net/ipv4/tcp_bbr.c bbr->ack_epoch_acked + rs->acked_sacked); bbr 856 net/ipv4/tcp_bbr.c extra_acked = bbr->ack_epoch_acked - expected_acked; bbr 858 net/ipv4/tcp_bbr.c if (extra_acked > bbr->extra_acked[bbr->extra_acked_win_idx]) bbr 859 net/ipv4/tcp_bbr.c bbr->extra_acked[bbr->extra_acked_win_idx] = extra_acked; bbr 873 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 876 net/ipv4/tcp_bbr.c if (bbr_full_bw_reached(sk) || !bbr->round_start || rs->is_app_limited) bbr 879 net/ipv4/tcp_bbr.c bw_thresh = (u64)bbr->full_bw * bbr_full_bw_thresh >> BBR_SCALE; bbr 881 net/ipv4/tcp_bbr.c bbr->full_bw = bbr_max_bw(sk); bbr 882 net/ipv4/tcp_bbr.c bbr->full_bw_cnt = 0; bbr 885 net/ipv4/tcp_bbr.c ++bbr->full_bw_cnt; bbr 886 net/ipv4/tcp_bbr.c bbr->full_bw_reached = bbr->full_bw_cnt >= bbr_full_bw_cnt; bbr 892 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 894 net/ipv4/tcp_bbr.c if (bbr->mode == BBR_STARTUP && bbr_full_bw_reached(sk)) { bbr 895 net/ipv4/tcp_bbr.c bbr->mode = BBR_DRAIN; /* drain queue we created */ bbr 899 net/ipv4/tcp_bbr.c if (bbr->mode == BBR_DRAIN && bbr 908 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 910 net/ipv4/tcp_bbr.c if (!(bbr->probe_rtt_done_stamp && bbr 911 net/ipv4/tcp_bbr.c after(tcp_jiffies32, bbr->probe_rtt_done_stamp))) bbr 914 net/ipv4/tcp_bbr.c bbr->min_rtt_stamp = tcp_jiffies32; /* wait a while until PROBE_RTT */ bbr 915 net/ipv4/tcp_bbr.c tp->snd_cwnd = max(tp->snd_cwnd, bbr->prior_cwnd); bbr 941 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 946 net/ipv4/tcp_bbr.c bbr->min_rtt_stamp + bbr_min_rtt_win_sec * HZ); bbr 948 net/ipv4/tcp_bbr.c (rs->rtt_us <= bbr->min_rtt_us || bbr 950 net/ipv4/tcp_bbr.c bbr->min_rtt_us = rs->rtt_us; bbr 951 net/ipv4/tcp_bbr.c bbr->min_rtt_stamp = tcp_jiffies32; bbr 955 net/ipv4/tcp_bbr.c !bbr->idle_restart && bbr->mode != BBR_PROBE_RTT) { bbr 956 net/ipv4/tcp_bbr.c bbr->mode = BBR_PROBE_RTT; /* dip, drain queue */ bbr 958 net/ipv4/tcp_bbr.c bbr->probe_rtt_done_stamp = 0; bbr 961 net/ipv4/tcp_bbr.c if (bbr->mode == BBR_PROBE_RTT) { bbr 966 net/ipv4/tcp_bbr.c if (!bbr->probe_rtt_done_stamp && bbr 968 net/ipv4/tcp_bbr.c bbr->probe_rtt_done_stamp = tcp_jiffies32 + bbr 970 net/ipv4/tcp_bbr.c bbr->probe_rtt_round_done = 0; bbr 971 net/ipv4/tcp_bbr.c bbr->next_rtt_delivered = tp->delivered; bbr 972 net/ipv4/tcp_bbr.c } else if (bbr->probe_rtt_done_stamp) { bbr 973 net/ipv4/tcp_bbr.c if (bbr->round_start) bbr 974 net/ipv4/tcp_bbr.c bbr->probe_rtt_round_done = 1; bbr 975 net/ipv4/tcp_bbr.c if (bbr->probe_rtt_round_done) bbr 981 net/ipv4/tcp_bbr.c bbr->idle_restart = 0; bbr 986 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 988 net/ipv4/tcp_bbr.c switch (bbr->mode) { bbr 990 net/ipv4/tcp_bbr.c bbr->pacing_gain = bbr_high_gain; bbr 991 net/ipv4/tcp_bbr.c bbr->cwnd_gain = bbr_high_gain; bbr 994 net/ipv4/tcp_bbr.c bbr->pacing_gain = bbr_drain_gain; /* slow, to drain */ bbr 995 net/ipv4/tcp_bbr.c bbr->cwnd_gain = bbr_high_gain; /* keep cwnd */ bbr 998 net/ipv4/tcp_bbr.c bbr->pacing_gain = (bbr->lt_use_bw ? bbr 1000 net/ipv4/tcp_bbr.c bbr_pacing_gain[bbr->cycle_idx]); bbr 1001 net/ipv4/tcp_bbr.c bbr->cwnd_gain = bbr_cwnd_gain; bbr 1004 net/ipv4/tcp_bbr.c bbr->pacing_gain = BBR_UNIT; bbr 1005 net/ipv4/tcp_bbr.c bbr->cwnd_gain = BBR_UNIT; bbr 1008 net/ipv4/tcp_bbr.c WARN_ONCE(1, "BBR bad mode: %u\n", bbr->mode); bbr 1026 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 1032 net/ipv4/tcp_bbr.c bbr_set_pacing_rate(sk, bw, bbr->pacing_gain); bbr 1033 net/ipv4/tcp_bbr.c bbr_set_cwnd(sk, rs, rs->acked_sacked, bw, bbr->cwnd_gain); bbr 1039 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 1041 net/ipv4/tcp_bbr.c bbr->prior_cwnd = 0; bbr 1043 net/ipv4/tcp_bbr.c bbr->rtt_cnt = 0; bbr 1044 net/ipv4/tcp_bbr.c bbr->next_rtt_delivered = 0; bbr 1045 net/ipv4/tcp_bbr.c bbr->prev_ca_state = TCP_CA_Open; bbr 1046 net/ipv4/tcp_bbr.c bbr->packet_conservation = 0; bbr 1048 net/ipv4/tcp_bbr.c bbr->probe_rtt_done_stamp = 0; bbr 1049 net/ipv4/tcp_bbr.c bbr->probe_rtt_round_done = 0; bbr 1050 net/ipv4/tcp_bbr.c bbr->min_rtt_us = tcp_min_rtt(tp); bbr 1051 net/ipv4/tcp_bbr.c bbr->min_rtt_stamp = tcp_jiffies32; bbr 1053 net/ipv4/tcp_bbr.c minmax_reset(&bbr->bw, bbr->rtt_cnt, 0); /* init max bw to 0 */ bbr 1055 net/ipv4/tcp_bbr.c bbr->has_seen_rtt = 0; bbr 1058 net/ipv4/tcp_bbr.c bbr->round_start = 0; bbr 1059 net/ipv4/tcp_bbr.c bbr->idle_restart = 0; bbr 1060 net/ipv4/tcp_bbr.c bbr->full_bw_reached = 0; bbr 1061 net/ipv4/tcp_bbr.c bbr->full_bw = 0; bbr 1062 net/ipv4/tcp_bbr.c bbr->full_bw_cnt = 0; bbr 1063 net/ipv4/tcp_bbr.c bbr->cycle_mstamp = 0; bbr 1064 net/ipv4/tcp_bbr.c bbr->cycle_idx = 0; bbr 1068 net/ipv4/tcp_bbr.c bbr->ack_epoch_mstamp = tp->tcp_mstamp; bbr 1069 net/ipv4/tcp_bbr.c bbr->ack_epoch_acked = 0; bbr 1070 net/ipv4/tcp_bbr.c bbr->extra_acked_win_rtts = 0; bbr 1071 net/ipv4/tcp_bbr.c bbr->extra_acked_win_idx = 0; bbr 1072 net/ipv4/tcp_bbr.c bbr->extra_acked[0] = 0; bbr 1073 net/ipv4/tcp_bbr.c bbr->extra_acked[1] = 0; bbr 1089 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 1091 net/ipv4/tcp_bbr.c bbr->full_bw = 0; /* spurious slow-down; reset full pipe detection */ bbr 1092 net/ipv4/tcp_bbr.c bbr->full_bw_cnt = 0; bbr 1110 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 1114 net/ipv4/tcp_bbr.c memset(&info->bbr, 0, sizeof(info->bbr)); bbr 1115 net/ipv4/tcp_bbr.c info->bbr.bbr_bw_lo = (u32)bw; bbr 1116 net/ipv4/tcp_bbr.c info->bbr.bbr_bw_hi = (u32)(bw >> 32); bbr 1117 net/ipv4/tcp_bbr.c info->bbr.bbr_min_rtt = bbr->min_rtt_us; bbr 1118 net/ipv4/tcp_bbr.c info->bbr.bbr_pacing_gain = bbr->pacing_gain; bbr 1119 net/ipv4/tcp_bbr.c info->bbr.bbr_cwnd_gain = bbr->cwnd_gain; bbr 1121 net/ipv4/tcp_bbr.c return sizeof(info->bbr); bbr 1128 net/ipv4/tcp_bbr.c struct bbr *bbr = inet_csk_ca(sk); bbr 1133 net/ipv4/tcp_bbr.c bbr->prev_ca_state = TCP_CA_Loss; bbr 1134 net/ipv4/tcp_bbr.c bbr->full_bw = 0; bbr 1135 net/ipv4/tcp_bbr.c bbr->round_start = 1; /* treat RTO like end of a round */ bbr 1157 net/ipv4/tcp_bbr.c BUILD_BUG_ON(sizeof(struct bbr) > ICSK_CA_PRIV_SIZE);