rcv_nxt           277 arch/sparc/include/asm/vio.h 	u64			rcv_nxt;
rcv_nxt           136 arch/sparc/kernel/ldc.c 	u32				rcv_nxt;
rcv_nxt           437 arch/sparc/kernel/ldc.c 		lp->rcv_nxt = 0;
rcv_nxt           476 arch/sparc/kernel/ldc.c 		p->u.r.ackid = lp->rcv_nxt;
rcv_nxt           500 arch/sparc/kernel/ldc.c 	p->u.r.ackid = lp->rcv_nxt;
rcv_nxt           681 arch/sparc/kernel/ldc.c 	lp->rcv_nxt = p->seqid;
rcv_nxt           709 arch/sparc/kernel/ldc.c 	return lp->rcv_nxt + 1 == seqid;
rcv_nxt           722 arch/sparc/kernel/ldc.c 	lp->rcv_nxt = p->seqid;
rcv_nxt          1496 arch/sparc/kernel/ldc.c 		lp->rcv_nxt, lp->snd_nxt);
rcv_nxt          1647 arch/sparc/kernel/ldc.c 		lp->rcv_nxt = first_frag->seqid - 1;
rcv_nxt          1731 arch/sparc/kernel/ldc.c 		p->u.r.ackid = lp->rcv_nxt;
rcv_nxt          1777 arch/sparc/kernel/ldc.c 		       lp->rcv_nxt);
rcv_nxt          1792 arch/sparc/kernel/ldc.c 		lp->rcv_nxt = p->seqid;
rcv_nxt          1887 arch/sparc/kernel/ldc.c 		lp->rcv_nxt = first_frag->seqid - 1;
rcv_nxt           389 drivers/block/drbd/drbd_debugfs.c 		int answ = tp->rcv_nxt - tp->copied_seq;
rcv_nxt           480 drivers/crypto/chelsio/chtls/chtls.h 		     u32 snd_nxt, u32 rcv_nxt);
rcv_nxt           220 drivers/crypto/chelsio/chtls/chtls_cm.c 		if (send_tx_flowc_wr(sk, 0, tp->snd_nxt, tp->rcv_nxt) < 0)
rcv_nxt          1488 drivers/crypto/chelsio/chtls/chtls_cm.c 	    tp->copied_seq != tp->rcv_nxt) {
rcv_nxt          1545 drivers/crypto/chelsio/chtls/chtls_cm.c 		handle_urg_ptr(sk, tp->rcv_nxt + ntohs(hdr->urg));
rcv_nxt          1547 drivers/crypto/chelsio/chtls/chtls_cm.c 		     tp->urg_seq - tp->rcv_nxt < skb->len))
rcv_nxt          1549 drivers/crypto/chelsio/chtls/chtls_cm.c 			       skb->data[tp->urg_seq - tp->rcv_nxt];
rcv_nxt          1553 drivers/crypto/chelsio/chtls/chtls_cm.c 		csk->delack_seq = tp->rcv_nxt;
rcv_nxt          1557 drivers/crypto/chelsio/chtls/chtls_cm.c 	tp->rcv_nxt += skb->len;
rcv_nxt          1610 drivers/crypto/chelsio/chtls/chtls_cm.c 		     tp->rcv_nxt < skb->len))
rcv_nxt          1612 drivers/crypto/chelsio/chtls/chtls_cm.c 			       skb->data[tp->urg_seq - tp->rcv_nxt];
rcv_nxt          1668 drivers/crypto/chelsio/chtls/chtls_cm.c 	tp->rcv_nxt +=
rcv_nxt          1709 drivers/crypto/chelsio/chtls/chtls_cm.c 	tp->rcv_nxt++;
rcv_nxt          1991 drivers/crypto/chelsio/chtls/chtls_cm.c 		if (send_tx_flowc_wr(sk, 0, tp->snd_nxt, tp->rcv_nxt) < 0)
rcv_nxt           160 drivers/crypto/chelsio/chtls/chtls_io.c 		     u32 snd_nxt, u32 rcv_nxt)
rcv_nxt           190 drivers/crypto/chelsio/chtls/chtls_io.c 	FLOWC_PARAM(RCVNXT, tp->rcv_nxt);
rcv_nxt           644 drivers/crypto/chelsio/chtls/chtls_io.c 						      tp->rcv_nxt);
rcv_nxt          4015 drivers/infiniband/hw/cxgb4/cm.c 	req->tcb.rcv_nxt = htonl(rcv_isn + 1);
rcv_nxt           494 drivers/infiniband/hw/i40iw/i40iw_cm.c 		cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt;
rcv_nxt          2355 drivers/infiniband/hw/i40iw/i40iw_cm.c 		cm_node->tcp_cntxt.rcv_nxt++;
rcv_nxt          2362 drivers/infiniband/hw/i40iw/i40iw_cm.c 		cm_node->tcp_cntxt.rcv_nxt++;
rcv_nxt          2369 drivers/infiniband/hw/i40iw/i40iw_cm.c 		cm_node->tcp_cntxt.rcv_nxt++;
rcv_nxt          2380 drivers/infiniband/hw/i40iw/i40iw_cm.c 		cm_node->tcp_cntxt.rcv_nxt++;
rcv_nxt          2390 drivers/infiniband/hw/i40iw/i40iw_cm.c 		cm_node->tcp_cntxt.rcv_nxt++;
rcv_nxt          2556 drivers/infiniband/hw/i40iw/i40iw_cm.c 	u32 rcv_nxt = cm_node->tcp_cntxt.rcv_nxt;
rcv_nxt          2564 drivers/infiniband/hw/i40iw/i40iw_cm.c 	else if (!between(seq, rcv_nxt, (rcv_nxt + rcv_wnd)))
rcv_nxt          2610 drivers/infiniband/hw/i40iw/i40iw_cm.c 		cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
rcv_nxt          2676 drivers/infiniband/hw/i40iw/i40iw_cm.c 		cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
rcv_nxt          2745 drivers/infiniband/hw/i40iw/i40iw_cm.c 			cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
rcv_nxt          2752 drivers/infiniband/hw/i40iw/i40iw_cm.c 			cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
rcv_nxt          2759 drivers/infiniband/hw/i40iw/i40iw_cm.c 			cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
rcv_nxt          2980 drivers/infiniband/hw/i40iw/i40iw_cm.c 			cm_node->tcp_cntxt.rcv_nxt =
rcv_nxt          2982 drivers/infiniband/hw/i40iw/i40iw_cm.c 			loopback_remotenode->tcp_cntxt.rcv_nxt =
rcv_nxt          3320 drivers/infiniband/hw/i40iw/i40iw_cm.c 	tcp_info->rcv_nxt = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
rcv_nxt          3325 drivers/infiniband/hw/i40iw/i40iw_cm.c 	tcp_info->snd_wl1 = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
rcv_nxt           267 drivers/infiniband/hw/i40iw/i40iw_cm.h 	u32 rcv_nxt;
rcv_nxt          2889 drivers/infiniband/hw/i40iw/i40iw_ctrl.c 			      LS_64(tcp->rcv_nxt, I40IWQPC_RCVNXT) |
rcv_nxt          1218 drivers/infiniband/hw/i40iw/i40iw_puda.c 	pfpdu->rcv_nxt = seqnum + fpdu_len;
rcv_nxt          1307 drivers/infiniband/hw/i40iw/i40iw_puda.c 			pfpdu->rcv_nxt = buf->seqnum + length;
rcv_nxt          1314 drivers/infiniband/hw/i40iw/i40iw_puda.c 		pfpdu->rcv_nxt = buf->seqnum;
rcv_nxt          1344 drivers/infiniband/hw/i40iw/i40iw_puda.c 		if (buf->seqnum != pfpdu->rcv_nxt) {
rcv_nxt          1401 drivers/infiniband/hw/i40iw/i40iw_puda.c 		pfpdu->rcv_nxt = fps;
rcv_nxt          1411 drivers/infiniband/hw/i40iw/i40iw_puda.c 	if (!(rcv_wnd >= (buf->seqnum - pfpdu->rcv_nxt))) {
rcv_nxt           265 drivers/infiniband/hw/i40iw/i40iw_type.h 	u32 rcv_nxt;
rcv_nxt           719 drivers/infiniband/hw/i40iw/i40iw_type.h 	u32 rcv_nxt;
rcv_nxt          4098 drivers/net/ethernet/broadcom/cnic_defs.h 	u32 rcv_nxt;
rcv_nxt           357 drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h 	u32 rcv_nxt;
rcv_nxt           720 drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h 	__be32 rcv_nxt;
rcv_nxt           773 drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h 	__be32 rcv_nxt;
rcv_nxt           736 drivers/net/ethernet/chelsio/cxgb4/t4_msg.h 	__be32 rcv_nxt;
rcv_nxt           809 drivers/net/ethernet/chelsio/cxgb4/t4_msg.h 	__be32 rcv_nxt;
rcv_nxt           569 drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h 		__be32 rcv_nxt;
rcv_nxt           265 drivers/net/ethernet/sun/sunvnet_common.c 	dr->rcv_nxt = 1;
rcv_nxt           269 drivers/net/ethernet/sun/sunvnet_common.c 	dr->rcv_nxt = 1;
rcv_nxt           651 drivers/net/ethernet/sun/sunvnet_common.c 	       pkt->tag.stype_env, pkt->seq, dr->rcv_nxt);
rcv_nxt           655 drivers/net/ethernet/sun/sunvnet_common.c 	if (unlikely(pkt->seq != dr->rcv_nxt)) {
rcv_nxt           657 drivers/net/ethernet/sun/sunvnet_common.c 		       pkt->seq, dr->rcv_nxt);
rcv_nxt           662 drivers/net/ethernet/sun/sunvnet_common.c 		dr->rcv_nxt++;
rcv_nxt           850 drivers/net/ethernet/sun/sunvnet_common.c 			pkt->seq = dr->rcv_nxt;
rcv_nxt           506 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
rcv_nxt           827 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	csk->rcv_nxt = ntohl(ddp_cpl.seq) + cxgbi_skcb_rx_pdulen(skb);
rcv_nxt           636 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	flowc->mnemval[5].val = htonl(csk->rcv_nxt);
rcv_nxt           662 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		csk->snd_nxt, csk->rcv_nxt, csk->snd_win,
rcv_nxt           855 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
rcv_nxt          1214 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		    (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt)) {
rcv_nxt          1217 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 				csk->rcv_nxt);
rcv_nxt          1240 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb);
rcv_nxt          1476 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	csk->rcv_nxt = seq + pdu_len_ddp;
rcv_nxt           157 drivers/scsi/cxgbi/libcxgbi.h 	u32 rcv_nxt;
rcv_nxt           220 drivers/target/iscsi/cxgbit/cxgbit.h 	u32 rcv_nxt;
rcv_nxt          1449 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	flowc->mnemval[5].val = cpu_to_be32(csk->rcv_nxt);
rcv_nxt          1482 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		 csk->rcv_nxt, csk->snd_win, csk->emss);
rcv_nxt          1632 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	csk->rcv_nxt = rcv_isn;
rcv_nxt          1543 drivers/target/iscsi/cxgbit/cxgbit_target.c 	    (pdu_cb->seq != csk->rcv_nxt)) {
rcv_nxt          1545 drivers/target/iscsi/cxgbit/cxgbit_target.c 			csk, csk->tid, pdu_cb->seq, csk->rcv_nxt);
rcv_nxt          1550 drivers/target/iscsi/cxgbit/cxgbit_target.c 	csk->rcv_nxt += lro_cb->pdu_totallen;
rcv_nxt           127 include/linux/tcp.h 	u32				rcv_nxt; /* the ack # by SYNACK. For
rcv_nxt           165 include/linux/tcp.h  	u32	rcv_nxt;	/* What we want to receive next 	*/
rcv_nxt           592 include/net/tcp.h void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
rcv_nxt           727 include/net/tcp.h 	s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
rcv_nxt          2093 include/net/tcp.h 		   !before(tp->urg_seq, tp->rcv_nxt)) {
rcv_nxt          2095 include/net/tcp.h 		answ = tp->rcv_nxt - tp->copied_seq;
rcv_nxt          3082 include/uapi/linux/bpf.h 	__u32 rcv_nxt;		/* What we want to receive next		*/
rcv_nxt          3337 include/uapi/linux/bpf.h 	__u32 rcv_nxt;
rcv_nxt          5624 net/core/filter.c 	case offsetof(struct bpf_tcp_sock, rcv_nxt):
rcv_nxt          5625 net/core/filter.c 		BPF_TCP_SOCK_GET_COMMON(rcv_nxt);
rcv_nxt          8236 net/core/filter.c 	case offsetof(struct bpf_sock_ops, rcv_nxt):
rcv_nxt          8237 net/core/filter.c 		SOCK_OPS_GET_TCP_SOCK_FIELD(rcv_nxt);
rcv_nxt           480 net/ipv4/tcp.c 	int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq);
rcv_nxt          1538 net/ipv4/tcp.c 	     tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
rcv_nxt          1546 net/ipv4/tcp.c 		    tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
rcv_nxt          1929 net/ipv4/tcp.c 	u32 rcv_nxt = READ_ONCE(tp->rcv_nxt);
rcv_nxt          1932 net/ipv4/tcp.c 	inq = rcv_nxt - copied_seq;
rcv_nxt          1935 net/ipv4/tcp.c 		inq = tp->rcv_nxt - tp->copied_seq;
rcv_nxt          2036 net/ipv4/tcp.c 				 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
rcv_nxt          2051 net/ipv4/tcp.c 			     *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
rcv_nxt          2602 net/ipv4/tcp.c 	WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
rcv_nxt          2719 net/ipv4/tcp.c 	if (after(opt.snd_wl1, tp->rcv_nxt + opt.rcv_wnd))
rcv_nxt          2722 net/ipv4/tcp.c 	if (after(opt.rcv_wup, tp->rcv_nxt))
rcv_nxt          2958 net/ipv4/tcp.c 			WRITE_ONCE(tp->rcv_nxt, val);
rcv_nxt          3598 net/ipv4/tcp.c 			val = tp->rcv_nxt;
rcv_nxt            84 net/ipv4/tcp_dctcp.c 		ca->prior_rcv_nxt = tp->rcv_nxt;
rcv_nxt            35 net/ipv4/tcp_dctcp.h 	*prior_rcv_nxt = tcp_sk(sk)->rcv_nxt;
rcv_nxt            29 net/ipv4/tcp_diag.c 		r->idiag_rqueue = max_t(int, READ_ONCE(tp->rcv_nxt) -
rcv_nxt           167 net/ipv4/tcp_fastopen.c 	if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt)
rcv_nxt           190 net/ipv4/tcp_fastopen.c 	tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
rcv_nxt           277 net/ipv4/tcp_fastopen.c 	tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
rcv_nxt           281 net/ipv4/tcp_fastopen.c 	tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
rcv_nxt           282 net/ipv4/tcp_fastopen.c 	tp->rcv_wup = tp->rcv_nxt;
rcv_nxt           564 net/ipv4/tcp_input.c 	if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
rcv_nxt           572 net/ipv4/tcp_input.c 	tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd;
rcv_nxt          3368 net/ipv4/tcp_input.c 	u32 delta = seq - tp->rcv_nxt;
rcv_nxt          3372 net/ipv4/tcp_input.c 	WRITE_ONCE(tp->rcv_nxt, seq);
rcv_nxt          4068 net/ipv4/tcp_input.c 		(th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) &&
rcv_nxt          4105 net/ipv4/tcp_input.c 		!after(seq, tp->rcv_nxt + tcp_receive_window(tp));
rcv_nxt          4239 net/ipv4/tcp_input.c 		if (before(seq, tp->rcv_nxt))
rcv_nxt          4278 net/ipv4/tcp_input.c 	    before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
rcv_nxt          4286 net/ipv4/tcp_input.c 			if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))
rcv_nxt          4287 net/ipv4/tcp_input.c 				end_seq = tp->rcv_nxt;
rcv_nxt          4383 net/ipv4/tcp_input.c 		if (!before(tp->rcv_nxt, sp->start_seq)) {
rcv_nxt          4387 net/ipv4/tcp_input.c 			WARN_ON(before(tp->rcv_nxt, sp->end_seq));
rcv_nxt          4481 net/ipv4/tcp_input.c 	__u32 dsack_high = tp->rcv_nxt;
rcv_nxt          4489 net/ipv4/tcp_input.c 		if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
rcv_nxt          4501 net/ipv4/tcp_input.c 		if (unlikely(!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))) {
rcv_nxt          4732 net/ipv4/tcp_input.c 	TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt;
rcv_nxt          4752 net/ipv4/tcp_input.c 	int avail = tp->rcv_nxt - tp->copied_seq;
rcv_nxt          4782 net/ipv4/tcp_input.c 	if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
rcv_nxt          4825 net/ipv4/tcp_input.c 	if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
rcv_nxt          4840 net/ipv4/tcp_input.c 	if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp)))
rcv_nxt          4843 net/ipv4/tcp_input.c 	if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
rcv_nxt          4845 net/ipv4/tcp_input.c 		tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt);
rcv_nxt          5138 net/ipv4/tcp_input.c 			     tp->copied_seq, tp->rcv_nxt);
rcv_nxt          5236 net/ipv4/tcp_input.c 	if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss &&
rcv_nxt          5242 net/ipv4/tcp_input.c 	    (tp->rcv_nxt - tp->copied_seq < sk->sk_rcvlowat ||
rcv_nxt          5262 net/ipv4/tcp_input.c 	if (tp->compressed_ack_rcv_nxt != tp->rcv_nxt) {
rcv_nxt          5263 net/ipv4/tcp_input.c 		tp->compressed_ack_rcv_nxt = tp->rcv_nxt;
rcv_nxt          5331 net/ipv4/tcp_input.c 	if (before(ptr, tp->rcv_nxt))
rcv_nxt          5357 net/ipv4/tcp_input.c 	    !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) {
rcv_nxt          5411 net/ipv4/tcp_input.c 	return unlikely(TCP_SKB_CB(skb)->seq == (tp->rcv_nxt - 1) &&
rcv_nxt          5472 net/ipv4/tcp_input.c 		if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt ||
rcv_nxt          5589 net/ipv4/tcp_input.c 	    TCP_SKB_CB(skb)->seq == tp->rcv_nxt &&
rcv_nxt          5624 net/ipv4/tcp_input.c 				    tp->rcv_nxt == tp->rcv_wup)
rcv_nxt          5659 net/ipv4/tcp_input.c 			    tp->rcv_nxt == tp->rcv_wup)
rcv_nxt          5943 net/ipv4/tcp_input.c 		WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1);
rcv_nxt          5972 net/ipv4/tcp_input.c 		WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
rcv_nxt          6046 net/ipv4/tcp_input.c 		WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1);
rcv_nxt          6047 net/ipv4/tcp_input.c 		WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
rcv_nxt          6231 net/ipv4/tcp_input.c 			WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
rcv_nxt          6287 net/ipv4/tcp_input.c 		    after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
rcv_nxt          6338 net/ipv4/tcp_input.c 		if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
rcv_nxt          6349 net/ipv4/tcp_input.c 			    after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
rcv_nxt          6440 net/ipv4/tcp_input.c 	tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
rcv_nxt           922 net/ipv4/tcp_ipv4.c 			tcp_rsk(req)->rcv_nxt,
rcv_nxt          2462 net/ipv4/tcp_ipv4.c 		rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
rcv_nxt           271 net/ipv4/tcp_minisocks.c 		tcptw->tw_rcv_nxt	= tp->rcv_nxt;
rcv_nxt           482 net/ipv4/tcp_minisocks.c 	WRITE_ONCE(newtp->rcv_nxt, seq);
rcv_nxt           710 net/ipv4/tcp_minisocks.c 					  tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
rcv_nxt           724 net/ipv4/tcp_minisocks.c 	if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
rcv_nxt           182 net/ipv4/tcp_output.c 				      u32 rcv_nxt)
rcv_nxt           194 net/ipv4/tcp_output.c 	if (unlikely(rcv_nxt != tp->rcv_nxt))
rcv_nxt           280 net/ipv4/tcp_output.c 	tp->rcv_wup = tp->rcv_nxt;
rcv_nxt          1018 net/ipv4/tcp_output.c 			      int clone_it, gfp_t gfp_mask, u32 rcv_nxt)
rcv_nxt          1111 net/ipv4/tcp_output.c 	th->ack_seq		= htonl(rcv_nxt);
rcv_nxt          1152 net/ipv4/tcp_output.c 		tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
rcv_nxt          1194 net/ipv4/tcp_output.c 				  tcp_sk(sk)->rcv_nxt);
rcv_nxt          3345 net/ipv4/tcp_output.c 	th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
rcv_nxt          3453 net/ipv4/tcp_output.c 		tp->rcv_nxt = 0;
rcv_nxt          3456 net/ipv4/tcp_output.c 	tp->rcv_wup = tp->rcv_nxt;
rcv_nxt          3457 net/ipv4/tcp_output.c 	WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
rcv_nxt          3684 net/ipv4/tcp_output.c void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
rcv_nxt          3717 net/ipv4/tcp_output.c 	__tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt);
rcv_nxt          3723 net/ipv4/tcp_output.c 	__tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt);
rcv_nxt          3763 net/ipv4/tcp_output.c 		tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
rcv_nxt          1053 net/ipv6/tcp_ipv6.c 			tcp_rsk(req)->rcv_nxt,
rcv_nxt          1431 net/ipv6/tcp_ipv6.c 	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
rcv_nxt          1899 net/ipv6/tcp_ipv6.c 		rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
rcv_nxt           169 net/tipc/link.c 	u16 rcv_nxt;
rcv_nxt           383 net/tipc/link.c 	if (more(l->snd_nxt, l->rcv_nxt))
rcv_nxt           384 net/tipc/link.c 		gap = l->snd_nxt - l->rcv_nxt;
rcv_nxt           386 net/tipc/link.c 		gap = buf_seqno(skb) - l->rcv_nxt;
rcv_nxt           402 net/tipc/link.c 	return l->rcv_nxt;
rcv_nxt           918 net/tipc/link.c 	l->rcv_nxt = 1;
rcv_nxt           946 net/tipc/link.c 	u16 ack = l->rcv_nxt - 1;
rcv_nxt           948 net/tipc/link.c 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
rcv_nxt          1031 net/tipc/link.c 	u16 ack = l->rcv_nxt - 1;
rcv_nxt          1032 net/tipc/link.c 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
rcv_nxt          1129 net/tipc/link.c 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
rcv_nxt          1130 net/tipc/link.c 	u16 ack = l->rcv_nxt - 1;
rcv_nxt          1406 net/tipc/link.c 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
rcv_nxt          1407 net/tipc/link.c 	u16 ack = l->rcv_nxt - 1;
rcv_nxt          1470 net/tipc/link.c 		if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
rcv_nxt          1475 net/tipc/link.c 		l->snd_nxt = l->rcv_nxt;
rcv_nxt          1538 net/tipc/link.c 	u16 seqno, rcv_nxt, win_lim;
rcv_nxt          1551 net/tipc/link.c 		rcv_nxt = l->rcv_nxt;
rcv_nxt          1552 net/tipc/link.c 		win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
rcv_nxt          1561 net/tipc/link.c 		if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
rcv_nxt          1574 net/tipc/link.c 		if (unlikely(seqno != rcv_nxt)) {
rcv_nxt          1581 net/tipc/link.c 		l->rcv_nxt++;
rcv_nxt          1592 net/tipc/link.c 	} while ((skb = __tipc_skb_dequeue(defq, l->rcv_nxt)));
rcv_nxt          1623 net/tipc/link.c 		rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
rcv_nxt          1637 net/tipc/link.c 	msg_set_ack(hdr, l->rcv_nxt - 1);
rcv_nxt          1638 net/tipc/link.c 	msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
rcv_nxt          1845 net/tipc/link.c 		tnl->drop_point = l->rcv_nxt;
rcv_nxt          1950 net/tipc/link.c 	u16 rcv_nxt = l->rcv_nxt;
rcv_nxt          2066 net/tipc/link.c 		if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
rcv_nxt          2067 net/tipc/link.c 			rcvgap = peers_snd_nxt - l->rcv_nxt;
rcv_nxt          2096 net/tipc/link.c 	u16 ack = l->rcv_nxt - 1;
rcv_nxt          2143 net/tipc/link.c 		l->rcv_nxt = peers_snd_nxt;
rcv_nxt          2156 net/tipc/link.c 		l->rcv_nxt = peers_snd_nxt;
rcv_nxt          2186 net/tipc/link.c 	if (more(peers_snd_nxt, l->rcv_nxt + l->window))
rcv_nxt          2201 net/tipc/link.c 	if (!more(peers_snd_nxt, l->rcv_nxt)) {
rcv_nxt          2215 net/tipc/link.c 		if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
rcv_nxt          2291 net/tipc/link.c 	if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
rcv_nxt          2327 net/tipc/link.c 		skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
rcv_nxt          2697 net/tipc/link.c 	i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt);
rcv_nxt          1655 net/tipc/node.c 	u16 rcv_nxt, syncpt, dlv_nxt, inputq_len;
rcv_nxt          1668 net/tipc/node.c 	rcv_nxt = tipc_link_rcv_nxt(l);
rcv_nxt          1708 net/tipc/node.c 	if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt))
rcv_nxt          1738 net/tipc/node.c 		if (!more(rcv_nxt, n->sync_point))
rcv_nxt          3082 tools/include/uapi/linux/bpf.h 	__u32 rcv_nxt;		/* What we want to receive next		*/
rcv_nxt          3337 tools/include/uapi/linux/bpf.h 	__u32 rcv_nxt;
rcv_nxt           113 tools/testing/selftests/bpf/progs/test_sock_fields_kern.c 	dst->rcv_nxt = src->rcv_nxt;
rcv_nxt           114 tools/testing/selftests/bpf/test_sock_fields.c 	       tp->rcv_nxt, tp->snd_nxt, tp->snd_una, tp->mss_cache,