sk2               275 crypto/af_alg.c 	struct sock *sk2;
sk2               286 crypto/af_alg.c 	sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, kern);
sk2               288 crypto/af_alg.c 	if (!sk2)
sk2               291 crypto/af_alg.c 	sock_init_data(newsock, sk2);
sk2               292 crypto/af_alg.c 	security_sock_graft(sk2, newsock);
sk2               293 crypto/af_alg.c 	security_sk_clone(sk, sk2);
sk2               295 crypto/af_alg.c 	err = type->accept(ask->private, sk2);
sk2               299 crypto/af_alg.c 		err = type->accept_nokey(ask->private, sk2);
sk2               307 crypto/af_alg.c 	alg_sk(sk2)->parent = sk;
sk2               308 crypto/af_alg.c 	alg_sk(sk2)->type = type;
sk2               309 crypto/af_alg.c 	alg_sk(sk2)->nokey_refcnt = nokey;
sk2               238 crypto/algif_hash.c 	struct sock *sk2;
sk2               256 crypto/algif_hash.c 	sk2 = newsock->sk;
sk2               257 crypto/algif_hash.c 	ask2 = alg_sk(sk2);
sk2               266 crypto/algif_hash.c 		sock_orphan(sk2);
sk2               267 crypto/algif_hash.c 		sock_put(sk2);
sk2               123 drivers/isdn/mISDN/dsp_dtmf.c 	s32 sk, sk1, sk2;
sk2               159 drivers/isdn/mISDN/dsp_dtmf.c 			sk2 = (*hfccoeff++) >> 4;
sk2               161 drivers/isdn/mISDN/dsp_dtmf.c 			if (sk > 32767 || sk < -32767 || sk2 > 32767
sk2               162 drivers/isdn/mISDN/dsp_dtmf.c 			    || sk2 < -32767)
sk2               168 drivers/isdn/mISDN/dsp_dtmf.c 				(((cos2pik[k] * sk) >> 15) * sk2) +
sk2               169 drivers/isdn/mISDN/dsp_dtmf.c 				(sk2 * sk2);
sk2               187 drivers/isdn/mISDN/dsp_dtmf.c 		sk2 = 0;
sk2               191 drivers/isdn/mISDN/dsp_dtmf.c 			sk = ((cos2pik_ * sk1) >> 15) - sk2 + (*buf++);
sk2               192 drivers/isdn/mISDN/dsp_dtmf.c 			sk2 = sk1;
sk2               196 drivers/isdn/mISDN/dsp_dtmf.c 		sk2 >>= 8;
sk2               197 drivers/isdn/mISDN/dsp_dtmf.c 		if (sk > 32767 || sk < -32767 || sk2 > 32767 || sk2 < -32767)
sk2               202 drivers/isdn/mISDN/dsp_dtmf.c 			(((cos2pik[k] * sk) >> 15) * sk2) +
sk2               203 drivers/isdn/mISDN/dsp_dtmf.c 			(sk2 * sk2);
sk2               110 include/net/addrconf.h bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
sk2                31 include/net/sock_reuseport.h extern int reuseport_add_sock(struct sock *sk, struct sock *sk2,
sk2               151 net/core/sock_reuseport.c int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany)
sk2               155 net/core/sock_reuseport.c 	if (!rcu_access_pointer(sk2->sk_reuseport_cb)) {
sk2               156 net/core/sock_reuseport.c 		int err = reuseport_alloc(sk2, bind_inany);
sk2               163 net/core/sock_reuseport.c 	reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
sk2               272 net/core/sock_reuseport.c 	struct sock *sk2 = NULL;
sk2               292 net/core/sock_reuseport.c 			sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, hash);
sk2               294 net/core/sock_reuseport.c 			sk2 = run_bpf_filter(reuse, socks, prog, skb, hdr_len);
sk2               298 net/core/sock_reuseport.c 		if (!sk2) {
sk2               309 net/core/sock_reuseport.c 			sk2 = reuse->socks[i];
sk2               315 net/core/sock_reuseport.c 	return sk2;
sk2               710 net/decnet/dn_nsp_in.c static int dn_nsp_rx_packet(struct net *net, struct sock *sk2,
sk2               734 net/ipv4/af_inet.c 	struct sock *sk2 = sk1->sk_prot->accept(sk1, flags, &err, kern);
sk2               736 net/ipv4/af_inet.c 	if (!sk2)
sk2               739 net/ipv4/af_inet.c 	lock_sock(sk2);
sk2               741 net/ipv4/af_inet.c 	sock_rps_record_flow(sk2);
sk2               742 net/ipv4/af_inet.c 	WARN_ON(!((1 << sk2->sk_state) &
sk2               746 net/ipv4/af_inet.c 	sock_graft(sk2, newsock);
sk2               750 net/ipv4/af_inet.c 	release_sock(sk2);
sk2                91 net/ipv4/inet_connection_sock.c bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
sk2                97 net/ipv4/inet_connection_sock.c 					    inet6_rcv_saddr(sk2),
sk2                99 net/ipv4/inet_connection_sock.c 					    sk2->sk_rcv_saddr,
sk2               101 net/ipv4/inet_connection_sock.c 					    ipv6_only_sock(sk2),
sk2               105 net/ipv4/inet_connection_sock.c 	return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr,
sk2               106 net/ipv4/inet_connection_sock.c 				    ipv6_only_sock(sk2), match_wildcard,
sk2               137 net/ipv4/inet_connection_sock.c 	struct sock *sk2;
sk2               149 net/ipv4/inet_connection_sock.c 	sk_for_each_bound(sk2, &tb->owners) {
sk2               150 net/ipv4/inet_connection_sock.c 		if (sk != sk2 &&
sk2               152 net/ipv4/inet_connection_sock.c 		     !sk2->sk_bound_dev_if ||
sk2               153 net/ipv4/inet_connection_sock.c 		     sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
sk2               154 net/ipv4/inet_connection_sock.c 			if ((!reuse || !sk2->sk_reuse ||
sk2               155 net/ipv4/inet_connection_sock.c 			    sk2->sk_state == TCP_LISTEN) &&
sk2               156 net/ipv4/inet_connection_sock.c 			    (!reuseport || !sk2->sk_reuseport ||
sk2               158 net/ipv4/inet_connection_sock.c 			     (sk2->sk_state != TCP_TIME_WAIT &&
sk2               159 net/ipv4/inet_connection_sock.c 			     !uid_eq(uid, sock_i_uid(sk2))))) {
sk2               160 net/ipv4/inet_connection_sock.c 				if (inet_rcv_saddr_equal(sk, sk2, true))
sk2               163 net/ipv4/inet_connection_sock.c 			if (!relax && reuse && sk2->sk_reuse &&
sk2               164 net/ipv4/inet_connection_sock.c 			    sk2->sk_state != TCP_LISTEN) {
sk2               165 net/ipv4/inet_connection_sock.c 				if (inet_rcv_saddr_equal(sk, sk2, true))
sk2               170 net/ipv4/inet_connection_sock.c 	return sk2 != NULL;
sk2               412 net/ipv4/inet_hashtables.c 	struct sock *sk2;
sk2               418 net/ipv4/inet_hashtables.c 	sk_nulls_for_each(sk2, node, &head->chain) {
sk2               419 net/ipv4/inet_hashtables.c 		if (sk2->sk_hash != hash)
sk2               422 net/ipv4/inet_hashtables.c 		if (likely(INET_MATCH(sk2, net, acookie,
sk2               424 net/ipv4/inet_hashtables.c 			if (sk2->sk_state == TCP_TIME_WAIT) {
sk2               425 net/ipv4/inet_hashtables.c 				tw = inet_twsk(sk2);
sk2               426 net/ipv4/inet_hashtables.c 				if (twsk_unique(sk, sk2, twp))
sk2               520 net/ipv4/inet_hashtables.c 	struct sock *sk2;
sk2               523 net/ipv4/inet_hashtables.c 	sk_nulls_for_each_rcu(sk2, node, &ilb->nulls_head) {
sk2               524 net/ipv4/inet_hashtables.c 		if (sk2 != sk &&
sk2               525 net/ipv4/inet_hashtables.c 		    sk2->sk_family == sk->sk_family &&
sk2               526 net/ipv4/inet_hashtables.c 		    ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
sk2               527 net/ipv4/inet_hashtables.c 		    sk2->sk_bound_dev_if == sk->sk_bound_dev_if &&
sk2               528 net/ipv4/inet_hashtables.c 		    inet_csk(sk2)->icsk_bind_hash == tb &&
sk2               529 net/ipv4/inet_hashtables.c 		    sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
sk2               530 net/ipv4/inet_hashtables.c 		    inet_rcv_saddr_equal(sk, sk2, false))
sk2               531 net/ipv4/inet_hashtables.c 			return reuseport_add_sock(sk, sk2,
sk2                34 net/ipv4/netfilter/nf_tproxy_ipv4.c 		struct sock *sk2;
sk2                36 net/ipv4/netfilter/nf_tproxy_ipv4.c 		sk2 = nf_tproxy_get_sock_v4(net, skb, iph->protocol,
sk2                40 net/ipv4/netfilter/nf_tproxy_ipv4.c 		if (sk2) {
sk2                42 net/ipv4/netfilter/nf_tproxy_ipv4.c 			sk = sk2;
sk2                82 net/ipv4/ping.c 	struct sock *sk2 = NULL;
sk2                95 net/ipv4/ping.c 			ping_portaddr_for_each_entry(sk2, node, hlist) {
sk2                96 net/ipv4/ping.c 				isk2 = inet_sk(sk2);
sk2               112 net/ipv4/ping.c 		ping_portaddr_for_each_entry(sk2, node, hlist) {
sk2               113 net/ipv4/ping.c 			isk2 = inet_sk(sk2);
sk2               120 net/ipv4/ping.c 			    (sk2 != sk) &&
sk2               121 net/ipv4/ping.c 			    (!sk2->sk_reuse || !sk->sk_reuse))
sk2              2001 net/ipv4/tcp_ipv4.c 		struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
sk2              2008 net/ipv4/tcp_ipv4.c 		if (sk2) {
sk2              2010 net/ipv4/tcp_ipv4.c 			sk = sk2;
sk2               133 net/ipv4/udp.c 	struct sock *sk2;
sk2               136 net/ipv4/udp.c 	sk_for_each(sk2, &hslot->head) {
sk2               137 net/ipv4/udp.c 		if (net_eq(sock_net(sk2), net) &&
sk2               138 net/ipv4/udp.c 		    sk2 != sk &&
sk2               139 net/ipv4/udp.c 		    (bitmap || udp_sk(sk2)->udp_port_hash == num) &&
sk2               140 net/ipv4/udp.c 		    (!sk2->sk_reuse || !sk->sk_reuse) &&
sk2               141 net/ipv4/udp.c 		    (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
sk2               142 net/ipv4/udp.c 		     sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
sk2               143 net/ipv4/udp.c 		    inet_rcv_saddr_equal(sk, sk2, true)) {
sk2               144 net/ipv4/udp.c 			if (sk2->sk_reuseport && sk->sk_reuseport &&
sk2               146 net/ipv4/udp.c 			    uid_eq(uid, sock_i_uid(sk2))) {
sk2               152 net/ipv4/udp.c 				__set_bit(udp_sk(sk2)->udp_port_hash >> log,
sk2               168 net/ipv4/udp.c 	struct sock *sk2;
sk2               173 net/ipv4/udp.c 	udp_portaddr_for_each_entry(sk2, &hslot2->head) {
sk2               174 net/ipv4/udp.c 		if (net_eq(sock_net(sk2), net) &&
sk2               175 net/ipv4/udp.c 		    sk2 != sk &&
sk2               176 net/ipv4/udp.c 		    (udp_sk(sk2)->udp_port_hash == num) &&
sk2               177 net/ipv4/udp.c 		    (!sk2->sk_reuse || !sk->sk_reuse) &&
sk2               178 net/ipv4/udp.c 		    (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
sk2               179 net/ipv4/udp.c 		     sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
sk2               180 net/ipv4/udp.c 		    inet_rcv_saddr_equal(sk, sk2, true)) {
sk2               181 net/ipv4/udp.c 			if (sk2->sk_reuseport && sk->sk_reuseport &&
sk2               183 net/ipv4/udp.c 			    uid_eq(uid, sock_i_uid(sk2))) {
sk2               199 net/ipv4/udp.c 	struct sock *sk2;
sk2               201 net/ipv4/udp.c 	sk_for_each(sk2, &hslot->head) {
sk2               202 net/ipv4/udp.c 		if (net_eq(sock_net(sk2), net) &&
sk2               203 net/ipv4/udp.c 		    sk2 != sk &&
sk2               204 net/ipv4/udp.c 		    sk2->sk_family == sk->sk_family &&
sk2               205 net/ipv4/udp.c 		    ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
sk2               206 net/ipv4/udp.c 		    (udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) &&
sk2               207 net/ipv4/udp.c 		    (sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
sk2               208 net/ipv4/udp.c 		    sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
sk2               209 net/ipv4/udp.c 		    inet_rcv_saddr_equal(sk, sk2, false)) {
sk2               210 net/ipv4/udp.c 			return reuseport_add_sock(sk, sk2,
sk2               216 net/ipv6/inet6_hashtables.c 	struct sock *sk2;
sk2               222 net/ipv6/inet6_hashtables.c 	sk_nulls_for_each(sk2, node, &head->chain) {
sk2               223 net/ipv6/inet6_hashtables.c 		if (sk2->sk_hash != hash)
sk2               226 net/ipv6/inet6_hashtables.c 		if (likely(INET6_MATCH(sk2, net, saddr, daddr, ports,
sk2               228 net/ipv6/inet6_hashtables.c 			if (sk2->sk_state == TCP_TIME_WAIT) {
sk2               229 net/ipv6/inet6_hashtables.c 				tw = inet_twsk(sk2);
sk2               230 net/ipv6/inet6_hashtables.c 				if (twsk_unique(sk, sk2, twp))
sk2                57 net/ipv6/netfilter/nf_tproxy_ipv6.c 		struct sock *sk2;
sk2                59 net/ipv6/netfilter/nf_tproxy_ipv6.c 		sk2 = nf_tproxy_get_sock_v6(net, skb, thoff, tproto,
sk2                65 net/ipv6/netfilter/nf_tproxy_ipv6.c 		if (sk2) {
sk2                67 net/ipv6/netfilter/nf_tproxy_ipv6.c 			sk = sk2;
sk2              1665 net/ipv6/tcp_ipv6.c 		struct sock *sk2;
sk2              1667 net/ipv6/tcp_ipv6.c 		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
sk2              1674 net/ipv6/tcp_ipv6.c 		if (sk2) {
sk2              1677 net/ipv6/tcp_ipv6.c 			sk = sk2;
sk2               739 net/sctp/input.c 			struct sock *sk2 = epb2->sk;
sk2               741 net/sctp/input.c 			if (!net_eq(sock_net(sk2), net) || sk2 == sk ||
sk2               742 net/sctp/input.c 			    !uid_eq(sock_i_uid(sk2), sock_i_uid(sk)) ||
sk2               743 net/sctp/input.c 			    !sk2->sk_reuseport)
sk2               746 net/sctp/input.c 			err = sctp_bind_addrs_check(sctp_sk(sk2),
sk2               749 net/sctp/input.c 				err = reuseport_add_sock(sk, sk2, any);
sk2              8242 net/sctp/socket.c 		struct sock *sk2;
sk2              8262 net/sctp/socket.c 		sk_for_each_bound(sk2, &pp->owner) {
sk2              8263 net/sctp/socket.c 			struct sctp_sock *sp2 = sctp_sk(sk2);
sk2              8266 net/sctp/socket.c 			if (sk == sk2 ||
sk2              8267 net/sctp/socket.c 			    (reuse && (sk2->sk_reuse || sp2->reuse) &&
sk2              8268 net/sctp/socket.c 			     sk2->sk_state != SCTP_SS_LISTENING) ||
sk2              8269 net/sctp/socket.c 			    (sk->sk_reuseport && sk2->sk_reuseport &&
sk2              8270 net/sctp/socket.c 			     uid_eq(uid, sock_i_uid(sk2))))
sk2              1086 net/unix/af_unix.c static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
sk2              1088 net/unix/af_unix.c 	if (unlikely(sk1 == sk2) || !sk2) {
sk2              1092 net/unix/af_unix.c 	if (sk1 < sk2) {
sk2              1094 net/unix/af_unix.c 		unix_state_lock_nested(sk2);
sk2              1096 net/unix/af_unix.c 		unix_state_lock(sk2);
sk2              1101 net/unix/af_unix.c static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
sk2              1103 net/unix/af_unix.c 	if (unlikely(sk1 == sk2) || !sk2) {
sk2              1108 net/unix/af_unix.c 	unix_state_unlock(sk2);