sk_rcvbuf         581 drivers/block/drbd/drbd_receiver.c 		sock->sk->sk_rcvbuf = rcv;
sk_rcvbuf        3915 fs/cifs/connect.c 		if (socket->sk->sk_rcvbuf < (140 * 1024))
sk_rcvbuf        3916 fs/cifs/connect.c 			socket->sk->sk_rcvbuf = 140 * 1024;
sk_rcvbuf        3930 fs/cifs/connect.c 		 socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo);
sk_rcvbuf         210 include/crypto/if_alg.h 	return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) -
sk_rcvbuf         190 include/net/dn_nsp.h         return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
sk_rcvbuf         222 include/net/llc_c_ev.h 	       (unsigned int)sk->sk_rcvbuf;
sk_rcvbuf         392 include/net/sock.h 	int			sk_rcvbuf;
sk_rcvbuf         689 include/net/tcp.h 	    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
sk_rcvbuf        1394 include/net/tcp.h 	return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
sk_rcvbuf        1401 include/net/tcp.h 	return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
sk_rcvbuf        1411 include/net/tcp.h 	int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
sk_rcvbuf          79 include/trace/events/sock.h 		__field(int, sk_rcvbuf)
sk_rcvbuf          85 include/trace/events/sock.h 		__entry->sk_rcvbuf  = READ_ONCE(sk->sk_rcvbuf);
sk_rcvbuf          89 include/trace/events/sock.h 		__entry->rmem_alloc, __entry->truesize, __entry->sk_rcvbuf)
sk_rcvbuf          18 net/atm/atm_misc.c 	if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
sk_rcvbuf          33 net/atm/atm_misc.c 	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
sk_rcvbuf         201 net/atm/proc.c 		   sk_rmem_alloc_get(sk), sk->sk_rcvbuf,
sk_rcvbuf         250 net/ax25/af_ax25.c 		    atomic_read(&s->sk->sk_rmem_alloc) <= s->sk->sk_rcvbuf) {
sk_rcvbuf         918 net/ax25/af_ax25.c 	sk->sk_rcvbuf   = osk->sk_rcvbuf;
sk_rcvbuf         125 net/ax25/ax25_ds_timer.c 			    (sk->sk_rcvbuf >> 1) &&
sk_rcvbuf         264 net/ax25/ax25_in.c 				    sk->sk_rcvbuf) {
sk_rcvbuf          63 net/ax25/ax25_std_timer.c 			    (sk->sk_rcvbuf >> 1) &&
sk_rcvbuf        1036 net/bluetooth/l2cap_sock.c 	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf >> 1)
sk_rcvbuf          60 net/bluetooth/rfcomm/sock.c 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
sk_rcvbuf         302 net/bluetooth/rfcomm/sock.c 	sk->sk_rcvbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10;
sk_rcvbuf         643 net/bluetooth/rfcomm/sock.c 	if (atomic_read(&sk->sk_rmem_alloc) <= (sk->sk_rcvbuf >> 2))
sk_rcvbuf         109 net/caif/caif_socket.c 	return cf_sk->sk.sk_rcvbuf / 4;
sk_rcvbuf         133 net/caif/caif_socket.c 		(unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
sk_rcvbuf        4257 net/core/filter.c 			WRITE_ONCE(sk->sk_rcvbuf,
sk_rcvbuf        4418 net/core/skbuff.c 	    (unsigned int)READ_ONCE(sk->sk_rcvbuf))
sk_rcvbuf         704 net/core/skmsg.c 		    sk_other->sk_rcvbuf) {
sk_rcvbuf         725 net/core/skmsg.c 		     sk_other->sk_rcvbuf)) {
sk_rcvbuf         456 net/core/sock.c 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
sk_rcvbuf         508 net/core/sock.c 	if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
sk_rcvbuf         525 net/core/sock.c 	} else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) {
sk_rcvbuf         835 net/core/sock.c 		WRITE_ONCE(sk->sk_rcvbuf,
sk_rcvbuf        1259 net/core/sock.c 		v.val = sk->sk_rcvbuf;
sk_rcvbuf        2865 net/core/sock.c 	sk->sk_rcvbuf		=	sysctl_rmem_default;
sk_rcvbuf        3214 net/core/sock.c 	mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf);
sk_rcvbuf         483 net/decnet/af_decnet.c 	sk->sk_rcvbuf	   = sysctl_decnet_rmem[1];
sk_rcvbuf         586 net/decnet/dn_nsp_in.c 	    (unsigned int)sk->sk_rcvbuf) {
sk_rcvbuf         454 net/ipv4/tcp.c 	WRITE_ONCE(sk->sk_rcvbuf, sock_net(sk)->ipv4.sysctl_tcp_rmem[1]);
sk_rcvbuf        1706 net/ipv4/tcp.c 		cap = sk->sk_rcvbuf >> 1;
sk_rcvbuf        1719 net/ipv4/tcp.c 	if (val > sk->sk_rcvbuf) {
sk_rcvbuf        1720 net/ipv4/tcp.c 		WRITE_ONCE(sk->sk_rcvbuf, val);
sk_rcvbuf         483 net/ipv4/tcp_input.c 	if (sk->sk_rcvbuf < net->ipv4.sysctl_tcp_rmem[2] &&
sk_rcvbuf         487 net/ipv4/tcp_input.c 		WRITE_ONCE(sk->sk_rcvbuf,
sk_rcvbuf         491 net/ipv4/tcp_input.c 	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
sk_rcvbuf         652 net/ipv4/tcp_input.c 		if (rcvbuf > sk->sk_rcvbuf) {
sk_rcvbuf         653 net/ipv4/tcp_input.c 			WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
sk_rcvbuf        4531 net/ipv4/tcp_input.c 	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
sk_rcvbuf        5049 net/ipv4/tcp_input.c 				if (sum_tiny > sk->sk_rcvbuf >> 3)
sk_rcvbuf        5084 net/ipv4/tcp_input.c 	goal = sk->sk_rcvbuf >> 3;
sk_rcvbuf        5093 net/ipv4/tcp_input.c 			if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
sk_rcvbuf        5096 net/ipv4/tcp_input.c 			goal = sk->sk_rcvbuf >> 3;
sk_rcvbuf        5125 net/ipv4/tcp_input.c 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
sk_rcvbuf        5130 net/ipv4/tcp_input.c 	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
sk_rcvbuf        5141 net/ipv4/tcp_input.c 	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
sk_rcvbuf        5149 net/ipv4/tcp_input.c 	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
sk_rcvbuf        1650 net/ipv4/tcp_ipv4.c 	u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf);
sk_rcvbuf        1371 net/ipv4/udp.c 		if (size < (sk->sk_rcvbuf >> 2) &&
sk_rcvbuf        1458 net/ipv4/udp.c 	if (rmem > sk->sk_rcvbuf)
sk_rcvbuf        1467 net/ipv4/udp.c 	if (rmem > (sk->sk_rcvbuf >> 1)) {
sk_rcvbuf        1479 net/ipv4/udp.c 	if (rmem > (size + (unsigned int)sk->sk_rcvbuf))
sk_rcvbuf        1862 net/iucv/af_iucv.c 	if (len > sk->sk_rcvbuf)
sk_rcvbuf         192 net/kcm/kcmsock.c 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
sk_rcvbuf          76 net/key/af_key.c 	if (3 * atomic_read(&sk->sk_rmem_alloc) <= 2 * sk->sk_rcvbuf)
sk_rcvbuf         200 net/key/af_key.c 	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
sk_rcvbuf        3716 net/key/af_key.c 	    3 * atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
sk_rcvbuf         816 net/llc/llc_conn.c 		if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
sk_rcvbuf        1292 net/netfilter/ipvs/ip_vs_sync.c 		sk->sk_rcvbuf = val * 2;
sk_rcvbuf        1210 net/netlink/af_netlink.c 	if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
sk_rcvbuf        1224 net/netlink/af_netlink.c 		if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
sk_rcvbuf        1379 net/netlink/af_netlink.c 	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
sk_rcvbuf        1383 net/netlink/af_netlink.c 		return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
sk_rcvbuf        2000 net/netlink/af_netlink.c 	    atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
sk_rcvbuf        2206 net/netlink/af_netlink.c 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
sk_rcvbuf         465 net/netrom/af_netrom.c 	sk->sk_rcvbuf   = osk->sk_rcvbuf;
sk_rcvbuf         136 net/netrom/nr_timer.c 		if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf / 2) &&
sk_rcvbuf        1235 net/packet/af_packet.c 		int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
sk_rcvbuf        2090 net/packet/af_packet.c 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
sk_rcvbuf        2242 net/packet/af_packet.c 			    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
sk_rcvbuf         686 net/rds/rds.h  	return rds_rs_to_sk(rs)->sk_rcvbuf / 2;
sk_rcvbuf         556 net/rose/af_rose.c 	sk->sk_rcvbuf   = osk->sk_rcvbuf;
sk_rcvbuf         182 net/rose/rose_in.c 			    (sk->sk_rcvbuf >> 1))
sk_rcvbuf         141 net/rose/rose_timer.c 		if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf / 2) &&
sk_rcvbuf         350 net/sched/em_meta.c 	dst->value = sk->sk_rcvbuf;
sk_rcvbuf         157 net/sctp/associola.c 	if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW)
sk_rcvbuf         160 net/sctp/associola.c 		asoc->rwnd = sk->sk_rcvbuf/2;
sk_rcvbuf        1457 net/sctp/associola.c 			   (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift),
sk_rcvbuf        1547 net/sctp/associola.c 	if (rx_count >= asoc->base.sk->sk_rcvbuf)
sk_rcvbuf         173 net/sctp/diag.c 		mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
sk_rcvbuf         325 net/sctp/input.c 			if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
sk_rcvbuf         340 net/sctp/input.c 			if (!sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
sk_rcvbuf         367 net/sctp/input.c 	ret = sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf));
sk_rcvbuf         277 net/sctp/proc.c 		sk->sk_rcvbuf);
sk_rcvbuf        3549 net/sctp/socket.c 	if (val > (sk->sk_rcvbuf >> 1))
sk_rcvbuf        9308 net/sctp/socket.c 	newsk->sk_rcvbuf = sk->sk_rcvbuf;
sk_rcvbuf        9374 net/sctp/socket.c 	newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
sk_rcvbuf         639 net/sctp/ulpevent.c 	if (rx_count >= sk->sk_rcvbuf || !sk_rmem_schedule(sk, skb, datalen))
sk_rcvbuf         291 net/smc/af_smc.c 	nsk->sk_rcvbuf = osk->sk_rcvbuf;
sk_rcvbuf        1384 net/smc/af_smc.c 		new_smc->sk.sk_rcvbuf = lsmc->sk.sk_rcvbuf;
sk_rcvbuf        1999 net/smc/af_smc.c 	smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE);
sk_rcvbuf         847 net/smc/smc_core.c 		sk_buf_size = smc->sk.sk_rcvbuf / 2;
sk_rcvbuf         896 net/smc/smc_core.c 		smc->sk.sk_rcvbuf = bufsize * 2;
sk_rcvbuf         344 net/strparser/strparser.c 			   strp->sk->sk_rcvbuf, strp->sk->sk_rcvtimeo);
sk_rcvbuf         362 net/sunrpc/svcsock.c 	sock->sk->sk_rcvbuf = nreqs * max_mesg * 2;
sk_rcvbuf        1648 net/sunrpc/xprtsock.c 		sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2;
sk_rcvbuf         196 net/tipc/group.c void tipc_group_join(struct net *net, struct tipc_group *grp, int *sk_rcvbuf)
sk_rcvbuf         208 net/tipc/group.c 	*sk_rcvbuf = tipc_group_rcvbuf_limit(grp);
sk_rcvbuf         850 net/tipc/group.c 			   int *sk_rcvbuf,
sk_rcvbuf         916 net/tipc/group.c 	*sk_rcvbuf = tipc_group_rcvbuf_limit(grp);
sk_rcvbuf          60 net/tipc/group.h 			   int *sk_rcvbuf, struct tipc_msg *hdr,
sk_rcvbuf         472 net/tipc/socket.c 	sk->sk_rcvbuf = sysctl_tipc_rmem[1];
sk_rcvbuf        1680 net/tipc/socket.c 		tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
sk_rcvbuf        2004 net/tipc/socket.c 		tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
sk_rcvbuf        2128 net/tipc/socket.c 		return READ_ONCE(sk->sk_rcvbuf);
sk_rcvbuf        2131 net/tipc/socket.c 		return READ_ONCE(sk->sk_rcvbuf) << msg_importance(hdr);
sk_rcvbuf        2134 net/tipc/socket.c 		return READ_ONCE(sk->sk_rcvbuf);
sk_rcvbuf        2917 net/tipc/socket.c 	tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
sk_rcvbuf        3799 net/tipc/socket.c 	i += scnprintf(buf + i, sz - i, " %d", sk->sk_rcvbuf);
sk_rcvbuf         359 net/vmw_vsock/hyperv_transport.c 		rcvbuf = max_t(int, sk->sk_rcvbuf, RINGBUFFER_HVS_RCV_SIZE);
sk_rcvbuf         433 net/vmw_vsock/hyperv_transport.c 	sk->sk_rcvbuf = RINGBUFFER_HVS_RCV_SIZE;
sk_rcvbuf         607 net/x25/af_x25.c 	sk->sk_rcvbuf      = osk->sk_rcvbuf;
sk_rcvbuf          58 net/x25/x25_dev.c 			queued = !sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf));
sk_rcvbuf         292 net/x25/x25_in.c 				    (sk->sk_rcvbuf >> 1))
sk_rcvbuf         370 net/x25/x25_subr.c 	if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf >> 1) &&