nsk               826 include/net/sock.h static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
nsk               828 include/net/sock.h 	nsk->sk_flags = osk->sk_flags;
nsk               205 kernel/bpf/reuseport_array.c 			     const struct sock *nsk,
nsk               216 kernel/bpf/reuseport_array.c 	if (nsk->sk_protocol != IPPROTO_UDP && nsk->sk_protocol != IPPROTO_TCP)
nsk               219 kernel/bpf/reuseport_array.c 	if (nsk->sk_family != AF_INET && nsk->sk_family != AF_INET6)
nsk               222 kernel/bpf/reuseport_array.c 	if (nsk->sk_type != SOCK_STREAM && nsk->sk_type != SOCK_DGRAM)
nsk               233 kernel/bpf/reuseport_array.c 	if (!sock_flag(nsk, SOCK_RCU_FREE) || !sk_hashed(nsk) || !nsk_reuse)
nsk               237 kernel/bpf/reuseport_array.c 	if (READ_ONCE(nsk->sk_user_data))
nsk               252 kernel/bpf/reuseport_array.c 	struct sock *free_osk = NULL, *osk, *nsk;
nsk               278 kernel/bpf/reuseport_array.c 	nsk = socket->sk;
nsk               279 kernel/bpf/reuseport_array.c 	if (!nsk) {
nsk               285 kernel/bpf/reuseport_array.c 	err = reuseport_array_update_check(array, nsk,
nsk               287 kernel/bpf/reuseport_array.c 					   rcu_access_pointer(nsk->sk_reuseport_cb),
nsk               298 kernel/bpf/reuseport_array.c 	write_lock_bh(&nsk->sk_callback_lock);
nsk               302 kernel/bpf/reuseport_array.c 	reuse = rcu_dereference_protected(nsk->sk_reuseport_cb,
nsk               304 kernel/bpf/reuseport_array.c 	err = reuseport_array_update_check(array, nsk, osk, reuse, map_flags);
nsk               313 kernel/bpf/reuseport_array.c 	WRITE_ONCE(nsk->sk_user_data, &array->ptrs[index]);
nsk               314 kernel/bpf/reuseport_array.c 	rcu_assign_pointer(array->ptrs[index], nsk);
nsk               319 kernel/bpf/reuseport_array.c 	write_unlock_bh(&nsk->sk_callback_lock);
nsk               308 net/bluetooth/l2cap_sock.c 	struct sock *sk = sock->sk, *nsk;
nsk               326 net/bluetooth/l2cap_sock.c 		nsk = bt_accept_dequeue(sk, newsock);
nsk               327 net/bluetooth/l2cap_sock.c 		if (nsk)
nsk               353 net/bluetooth/l2cap_sock.c 	BT_DBG("new socket %p", nsk);
nsk               480 net/bluetooth/rfcomm/sock.c 	struct sock *sk = sock->sk, *nsk;
nsk               503 net/bluetooth/rfcomm/sock.c 		nsk = bt_accept_dequeue(sk, newsock);
nsk               504 net/bluetooth/rfcomm/sock.c 		if (nsk)
nsk               530 net/bluetooth/rfcomm/sock.c 	BT_DBG("new socket %p", nsk);
nsk              1575 net/core/sock.c static void sock_copy(struct sock *nsk, const struct sock *osk)
nsk              1578 net/core/sock.c 	void *sptr = nsk->sk_security;
nsk              1580 net/core/sock.c 	memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
nsk              1582 net/core/sock.c 	memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
nsk              1586 net/core/sock.c 	nsk->sk_security = sptr;
nsk              1587 net/core/sock.c 	security_sk_clone(osk, nsk);
nsk               833 net/dccp/ipv4.c 		struct sock *nsk;
nsk               842 net/dccp/ipv4.c 		nsk = dccp_check_req(sk, skb, req);
nsk               843 net/dccp/ipv4.c 		if (!nsk) {
nsk               847 net/dccp/ipv4.c 		if (nsk == sk) {
nsk               849 net/dccp/ipv4.c 		} else if (dccp_child_process(sk, nsk, skb)) {
nsk               735 net/dccp/ipv6.c 		struct sock *nsk;
nsk               744 net/dccp/ipv6.c 		nsk = dccp_check_req(sk, skb, req);
nsk               745 net/dccp/ipv6.c 		if (!nsk) {
nsk               749 net/dccp/ipv6.c 		if (nsk == sk) {
nsk               751 net/dccp/ipv6.c 		} else if (dccp_child_process(sk, nsk, skb)) {
nsk              1572 net/ipv4/tcp_ipv4.c 		struct sock *nsk = tcp_v4_cookie_check(sk, skb);
nsk              1574 net/ipv4/tcp_ipv4.c 		if (!nsk)
nsk              1576 net/ipv4/tcp_ipv4.c 		if (nsk != sk) {
nsk              1577 net/ipv4/tcp_ipv4.c 			if (tcp_child_process(sk, nsk, skb)) {
nsk              1578 net/ipv4/tcp_ipv4.c 				rsk = nsk;
nsk              1858 net/ipv4/tcp_ipv4.c 		struct sock *nsk;
nsk              1879 net/ipv4/tcp_ipv4.c 		nsk = NULL;
nsk              1884 net/ipv4/tcp_ipv4.c 			nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
nsk              1886 net/ipv4/tcp_ipv4.c 		if (!nsk) {
nsk              1900 net/ipv4/tcp_ipv4.c 		if (nsk == sk) {
nsk              1903 net/ipv4/tcp_ipv4.c 		} else if (tcp_child_process(sk, nsk, skb)) {
nsk              1904 net/ipv4/tcp_ipv4.c 			tcp_v4_send_reset(nsk, skb);
nsk              1388 net/ipv6/tcp_ipv6.c 		struct sock *nsk = tcp_v6_cookie_check(sk, skb);
nsk              1390 net/ipv6/tcp_ipv6.c 		if (!nsk)
nsk              1393 net/ipv6/tcp_ipv6.c 		if (nsk != sk) {
nsk              1394 net/ipv6/tcp_ipv6.c 			if (tcp_child_process(sk, nsk, skb))
nsk              1528 net/ipv6/tcp_ipv6.c 		struct sock *nsk;
nsk              1546 net/ipv6/tcp_ipv6.c 		nsk = NULL;
nsk              1551 net/ipv6/tcp_ipv6.c 			nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
nsk              1553 net/ipv6/tcp_ipv6.c 		if (!nsk) {
nsk              1567 net/ipv6/tcp_ipv6.c 		if (nsk == sk) {
nsk              1570 net/ipv6/tcp_ipv6.c 		} else if (tcp_child_process(sk, nsk, skb)) {
nsk              1571 net/ipv6/tcp_ipv6.c 			tcp_v6_send_reset(nsk, skb);
nsk               963 net/iucv/af_iucv.c 	struct sock *sk = sock->sk, *nsk;
nsk               978 net/iucv/af_iucv.c 	while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
nsk              1742 net/iucv/af_iucv.c 	struct sock *sk, *nsk;
nsk              1787 net/iucv/af_iucv.c 	nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0);
nsk              1788 net/iucv/af_iucv.c 	if (!nsk) {
nsk              1794 net/iucv/af_iucv.c 	niucv = iucv_sk(nsk);
nsk              1795 net/iucv/af_iucv.c 	iucv_sock_init(nsk, sk);
nsk              1797 net/iucv/af_iucv.c 	nsk->sk_allocation |= GFP_DMA;
nsk              1815 net/iucv/af_iucv.c 	err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
nsk              1817 net/iucv/af_iucv.c 		iucv_sever_path(nsk, 1);
nsk              1818 net/iucv/af_iucv.c 		iucv_sock_kill(nsk);
nsk              1822 net/iucv/af_iucv.c 	iucv_accept_enqueue(sk, nsk);
nsk              1825 net/iucv/af_iucv.c 	nsk->sk_state = IUCV_CONNECTED;
nsk              1980 net/iucv/af_iucv.c 	struct sock *nsk;
nsk              1993 net/iucv/af_iucv.c 	nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0);
nsk              1997 net/iucv/af_iucv.c 	    !nsk) {
nsk              2002 net/iucv/af_iucv.c 		iucv_sock_kill(nsk);
nsk              2007 net/iucv/af_iucv.c 	niucv = iucv_sk(nsk);
nsk              2008 net/iucv/af_iucv.c 	iucv_sock_init(nsk, sk);
nsk              2019 net/iucv/af_iucv.c 	nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
nsk              2028 net/iucv/af_iucv.c 		iucv_accept_enqueue(sk, nsk);
nsk              2029 net/iucv/af_iucv.c 		nsk->sk_state = IUCV_CONNECTED;
nsk              2032 net/iucv/af_iucv.c 		iucv_sock_kill(nsk);
nsk               285 net/smc/af_smc.c static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
nsk               289 net/smc/af_smc.c 	nsk->sk_type = osk->sk_type;
nsk               290 net/smc/af_smc.c 	nsk->sk_sndbuf = osk->sk_sndbuf;
nsk               291 net/smc/af_smc.c 	nsk->sk_rcvbuf = osk->sk_rcvbuf;
nsk               292 net/smc/af_smc.c 	nsk->sk_sndtimeo = osk->sk_sndtimeo;
nsk               293 net/smc/af_smc.c 	nsk->sk_rcvtimeo = osk->sk_rcvtimeo;
nsk               294 net/smc/af_smc.c 	nsk->sk_mark = osk->sk_mark;
nsk               295 net/smc/af_smc.c 	nsk->sk_priority = osk->sk_priority;
nsk               296 net/smc/af_smc.c 	nsk->sk_rcvlowat = osk->sk_rcvlowat;
nsk               297 net/smc/af_smc.c 	nsk->sk_bound_dev_if = osk->sk_bound_dev_if;
nsk               298 net/smc/af_smc.c 	nsk->sk_err = osk->sk_err;
nsk               300 net/smc/af_smc.c 	nsk->sk_flags &= ~mask;
nsk               301 net/smc/af_smc.c 	nsk->sk_flags |= osk->sk_flags & mask;
nsk              1439 net/smc/af_smc.c 	struct sock *sk = sock->sk, *nsk;
nsk              1458 net/smc/af_smc.c 	while (!(nsk = smc_accept_dequeue(sk, new_sock))) {
nsk              1478 net/smc/af_smc.c 		rc = sock_error(nsk);
nsk              1487 net/smc/af_smc.c 		if (smc_sk(nsk)->use_fallback) {
nsk              1488 net/smc/af_smc.c 			struct sock *clcsk = smc_sk(nsk)->clcsock->sk;
nsk              1494 net/smc/af_smc.c 		} else if (!atomic_read(&smc_sk(nsk)->conn.bytes_to_rcv)) {
nsk              1495 net/smc/af_smc.c 			lock_sock(nsk);
nsk              1496 net/smc/af_smc.c 			smc_rx_wait(smc_sk(nsk), &timeo, smc_rx_data_available);
nsk              1497 net/smc/af_smc.c 			release_sock(nsk);