Lines Matching refs:flag
1134 int flag; member
1235 state->flag |= FLAG_ORIG_SACK_ACKED; in tcp_sacktag_one()
1253 state->flag |= FLAG_DATA_SACKED; in tcp_sacktag_one()
1653 state.flag = 0; in tcp_sacktag_write_queue()
1666 state.flag |= FLAG_DSACKING_ACK; in tcp_sacktag_write_queue()
1838 return state.flag; in tcp_sacktag_write_queue()
2003 static bool tcp_check_sack_reneging(struct sock *sk, int flag) in tcp_check_sack_reneging() argument
2005 if (flag & FLAG_SACK_RENEGING) { in tcp_check_sack_reneging()
2042 static bool tcp_pause_early_retransmit(struct sock *sk, int flag) in tcp_pause_early_retransmit() argument
2052 (flag & FLAG_ECE) || !tp->srtt_us) in tcp_pause_early_retransmit()
2159 static bool tcp_time_to_recover(struct sock *sk, int flag) in tcp_time_to_recover() argument
2203 return !tcp_pause_early_retransmit(sk, flag); in tcp_time_to_recover()
2573 static void tcp_try_to_open(struct sock *sk, int flag, const int prior_unsacked) in tcp_try_to_open() argument
2582 if (flag & FLAG_ECE) in tcp_try_to_open()
2696 static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack) in tcp_process_loss() argument
2701 if ((flag & FLAG_SND_UNA_ADVANCED) && in tcp_process_loss()
2709 if ((flag & FLAG_ORIG_SACK_ACKED) && in tcp_process_loss()
2714 if (flag & FLAG_DATA_SACKED || is_dupack) in tcp_process_loss()
2716 } else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) { in tcp_process_loss()
2737 else if (flag & FLAG_SND_UNA_ADVANCED) in tcp_process_loss()
2790 bool is_dupack, int flag) in tcp_fastretrans_alert() argument
2794 bool do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && in tcp_fastretrans_alert()
2805 if (flag & FLAG_ECE) in tcp_fastretrans_alert()
2809 if (tcp_check_sack_reneging(sk, flag)) in tcp_fastretrans_alert()
2844 if (!(flag & FLAG_SND_UNA_ADVANCED)) { in tcp_fastretrans_alert()
2860 tcp_process_loss(sk, flag, is_dupack); in tcp_fastretrans_alert()
2866 if (flag & FLAG_SND_UNA_ADVANCED) in tcp_fastretrans_alert()
2875 if (!tcp_time_to_recover(sk, flag)) { in tcp_fastretrans_alert()
2876 tcp_try_to_open(sk, flag, prior_unsacked); in tcp_fastretrans_alert()
2892 tcp_enter_recovery(sk, (flag & FLAG_ECE)); in tcp_fastretrans_alert()
2902 static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag, in tcp_ack_update_rtt() argument
2912 if (flag & FLAG_RETRANS_DATA_ACKED) in tcp_ack_update_rtt()
2925 flag & FLAG_ACKED) in tcp_ack_update_rtt()
3071 int flag = 0; in tcp_clean_rtx_queue() local
3102 flag |= FLAG_RETRANS_DATA_ACKED; in tcp_clean_rtx_queue()
3111 flag |= FLAG_ORIG_SACK_ACKED; in tcp_clean_rtx_queue()
3130 flag |= FLAG_DATA_ACKED; in tcp_clean_rtx_queue()
3132 flag |= FLAG_SYN_ACKED; in tcp_clean_rtx_queue()
3151 flag |= FLAG_SACK_RENEGING; in tcp_clean_rtx_queue()
3159 rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us); in tcp_clean_rtx_queue()
3161 if (flag & FLAG_ACKED) { in tcp_clean_rtx_queue()
3224 return flag; in tcp_clean_rtx_queue()
3248 static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag) in tcp_ack_is_dubious() argument
3250 return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) || in tcp_ack_is_dubious()
3255 static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag) in tcp_may_raise_cwnd() argument
3267 return flag & FLAG_FORWARD_PROGRESS; in tcp_may_raise_cwnd()
3269 return flag & FLAG_DATA_ACKED; in tcp_may_raise_cwnd()
3315 int flag = 0; in tcp_ack_update_window() local
3322 flag |= FLAG_WIN_UPDATE; in tcp_ack_update_window()
3343 return flag; in tcp_ack_update_window()
3429 static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) in tcp_process_tlp_ack() argument
3436 if (flag & FLAG_DSACKING_ACK) { in tcp_process_tlp_ack()
3449 } else if (!(flag & (FLAG_SND_UNA_ADVANCED | in tcp_process_tlp_ack()
3465 static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) in tcp_ack() argument
3505 flag |= FLAG_SND_UNA_ADVANCED; in tcp_ack()
3514 if (flag & FLAG_UPDATE_TS_RECENT) in tcp_ack()
3517 if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) { in tcp_ack()
3524 flag |= FLAG_WIN_UPDATE; in tcp_ack()
3533 flag |= FLAG_DATA; in tcp_ack()
3537 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); in tcp_ack()
3540 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, in tcp_ack()
3544 flag |= FLAG_ECE; in tcp_ack()
3548 if (flag & FLAG_WIN_UPDATE) in tcp_ack()
3565 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, in tcp_ack()
3570 if (tcp_may_raise_cwnd(sk, flag)) in tcp_ack()
3573 if (tcp_ack_is_dubious(sk, flag)) { in tcp_ack()
3574 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); in tcp_ack()
3576 is_dupack, flag); in tcp_ack()
3579 tcp_process_tlp_ack(sk, ack, flag); in tcp_ack()
3581 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) { in tcp_ack()
3594 if (flag & FLAG_DSACKING_ACK) in tcp_ack()
3596 is_dupack, flag); in tcp_ack()
3605 tcp_process_tlp_ack(sk, ack, flag); in tcp_ack()
3617 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, in tcp_ack()
3620 is_dupack, flag); in tcp_ack()